Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetoot...
authorDavid S. Miller <davem@davemloft.net>
Mon, 14 Dec 2015 21:23:10 +0000 (16:23 -0500)
committerDavid S. Miller <davem@davemloft.net>
Mon, 14 Dec 2015 21:23:10 +0000 (16:23 -0500)
Johan Hedberg says:

====================
pull request: bluetooth-next 2015-12-11

Here's another set of Bluetooth & 802.15.4 patches for the 4.5 kernel:

 - 6LoWPAN debugfs support
 - New 802.15.4 driver for ADF7242 MAC IEEE802154
 - Initial code for 6LoWPAN Generic Header Compression (GHC) support
 - Refactor Bluetooth LE scan & advertising behind dedicated workqueue
 - Cleanups to Bluetooth H:5 HCI driver
 - Support for Toshiba Broadcom based Bluetooth controllers
 - Use continuous scanning when establishing Bluetooth LE connections

Please let me know if there are any issues pulling. Thanks.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
108 files changed:
Documentation/devicetree/bindings/net/micrel-ksz90x1.txt
MAINTAINERS
drivers/net/ethernet/3com/3c509.c
drivers/net/ethernet/3com/3c59x.c
drivers/net/ethernet/cavium/thunder/nic.h
drivers/net/ethernet/cavium/thunder/nic_main.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.h
drivers/net/ethernet/cavium/thunder/q_struct.h
drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/dec/tulip/de4x5.c
drivers/net/ethernet/hp/hp100.c
drivers/net/ethernet/intel/e1000/e1000.h
drivers/net/ethernet/intel/e1000/e1000_hw.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000e/hw.h
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/fm10k/Makefile
drivers/net/ethernet/intel/fm10k/fm10k.h
drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
drivers/net/ethernet/intel/fm10k/fm10k_main.c
drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
drivers/net/ethernet/intel/fm10k/fm10k_pf.c
drivers/net/ethernet/intel/fm10k/fm10k_pf.h
drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
drivers/net/ethernet/intel/fm10k/fm10k_type.h
drivers/net/ethernet/intel/fm10k/fm10k_vf.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/e1000_defines.h
drivers/net/ethernet/intel/igb/e1000_i210.c
drivers/net/ethernet/intel/igb/e1000_i210.h
drivers/net/ethernet/intel/igb/e1000_phy.c
drivers/net/ethernet/intel/igb/e1000_phy.h
drivers/net/ethernet/intel/igb/e1000_regs.h
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlx5/core/Makefile
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/flow_table.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/fw.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/ti/netcp.h
drivers/net/ethernet/ti/netcp_core.c
drivers/net/geneve.c
drivers/net/hyperv/netvsc.c
drivers/net/ppp/ppp_generic.c
drivers/net/wan/wanxl.c
drivers/s390/net/ctcm_main.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
include/linux/mlx5/driver.h
include/linux/mlx5/flow_table.h [deleted file]
include/linux/mlx5/fs.h [new file with mode: 0644]
include/linux/mlx5/mlx5_ifc.h
include/linux/soc/ti/knav_dma.h
include/net/checksum.h
include/uapi/linux/if_link.h
kernel/bpf/inode.c
kernel/cgroup.c
net/ipv6/addrconf.c
net/iucv/af_iucv.c
net/mpls/mpls_iptunnel.c

index 692076fda0e589788567a5283d79525f04f01852..f9c32adab5c6414b6d17577a37e31245e5fa30a7 100644 (file)
@@ -1,8 +1,9 @@
 Micrel KSZ9021/KSZ9031 Gigabit Ethernet PHY
 
-Some boards require special tuning values, particularly when it comes to
-clock delays. You can specify clock delay values by adding
-micrel-specific properties to an Ethernet OF device node.
+Some boards require special tuning values, particularly when it comes
+to clock delays. You can specify clock delay values in the PHY OF
+device node. Deprecated, but still supported, these properties can
+also be added to an Ethernet OF device node.
 
 Note that these settings are applied after any phy-specific fixup from
 phy_fixup_list (see phy_init_hw() from drivers/net/phy/phy_device.c),
@@ -57,16 +58,6 @@ KSZ9031:
 
 Examples:
 
-       /* Attach to an Ethernet device with autodetected PHY */
-       &enet {
-               rxc-skew-ps = <3000>;
-               rxdv-skew-ps = <0>;
-               txc-skew-ps = <3000>;
-               txen-skew-ps = <0>;
-               status = "okay";
-       };
-
-       /* Attach to an explicitly-specified PHY */
        mdio {
                phy0: ethernet-phy@0 {
                        rxc-skew-ps = <3000>;
index 4366c53dbb2b051679d23e7abca6f8a641b7b07b..04e8d181b44c55c78ef2f57f25ae268e33ebfa5b 100644 (file)
@@ -9134,7 +9134,7 @@ F:        drivers/s390/block/dasd*
 F:     block/partitions/ibm.c
 
 S390 NETWORK DRIVERS
-M:     Ursula Braun <ursula.braun@de.ibm.com>
+M:     Ursula Braun <ubraun@linux.vnet.ibm.com>
 L:     linux-s390@vger.kernel.org
 W:     http://www.ibm.com/developerworks/linux/linux390/
 S:     Supported
@@ -9164,7 +9164,7 @@ S:        Supported
 F:     drivers/s390/scsi/zfcp_*
 
 S390 IUCV NETWORK LAYER
-M:     Ursula Braun <ursula.braun@de.ibm.com>
+M:     Ursula Braun <ubraun@linux.vnet.ibm.com>
 L:     linux-s390@vger.kernel.org
 W:     http://www.ibm.com/developerworks/linux/linux390/
 S:     Supported
index 4547a1b8b958bab25ed6cd73ccd3d0b62112dfaa..7677c745fb30b38bf9371e7ada66c87f14bd4184 100644 (file)
@@ -562,7 +562,7 @@ static void el3_common_remove (struct net_device *dev)
 }
 
 #ifdef CONFIG_EISA
-static int __init el3_eisa_probe (struct device *device)
+static int el3_eisa_probe(struct device *device)
 {
        short i;
        int ioaddr, irq, if_port;
index 2839af00f20cd1618f6522b49db652499359ff30..1c5f3b273e6accb7d1e4f41611158eb9253d3d0d 100644 (file)
@@ -907,7 +907,7 @@ static struct eisa_device_id vortex_eisa_ids[] = {
 };
 MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids);
 
-static int __init vortex_eisa_probe(struct device *device)
+static int vortex_eisa_probe(struct device *device)
 {
        void __iomem *ioaddr;
        struct eisa_device *edev;
index 39ca6744a4e68fe317f93e1318e729509c1b978e..688828865c48253d5f96e0bd4cab4c315a635e37 100644 (file)
@@ -265,6 +265,7 @@ struct nicvf {
        u8                      tns_mode:1;
        u8                      sqs_mode:1;
        u8                      loopback_supported:1;
+       bool                    hw_tso;
        u16                     mtu;
        struct queue_set        *qs;
 #define        MAX_SQS_PER_VF_SINGLE_NODE              5
@@ -489,6 +490,11 @@ static inline int nic_get_node_id(struct pci_dev *pdev)
        return ((addr >> NIC_NODE_ID_SHIFT) & NIC_NODE_ID_MASK);
 }
 
+static inline bool pass1_silicon(struct pci_dev *pdev)
+{
+       return pdev->revision < 8;
+}
+
 int nicvf_set_real_num_queues(struct net_device *netdev,
                              int tx_queues, int rx_queues);
 int nicvf_open(struct net_device *netdev);
index 4b7fd63ae57c1d26ab54d9359297c56f35c6105c..9f80de4d501604043a0a2c051847f4eaddaa66bc 100644 (file)
@@ -55,11 +55,6 @@ struct nicpf {
        bool                    irq_allocated[NIC_PF_MSIX_VECTORS];
 };
 
-static inline bool pass1_silicon(struct nicpf *nic)
-{
-       return nic->pdev->revision < 8;
-}
-
 /* Supported devices */
 static const struct pci_device_id nic_id_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) },
@@ -123,7 +118,7 @@ static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx)
         * when PF writes to MBOX(1), in next revisions when
         * PF writes to MBOX(0)
         */
-       if (pass1_silicon(nic)) {
+       if (pass1_silicon(nic->pdev)) {
                /* see the comment for nic_reg_write()/nic_reg_read()
                 * functions above
                 */
@@ -400,7 +395,7 @@ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
                        padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */
 
                /* Leave RSS_SIZE as '0' to disable RSS */
-               if (pass1_silicon(nic)) {
+               if (pass1_silicon(nic->pdev)) {
                        nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
                                      (vnic << 24) | (padd << 16) |
                                      (rssi_base + rssi));
@@ -470,7 +465,7 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
        }
 
        cpi_base = nic->cpi_base[cfg->vf_id];
-       if (pass1_silicon(nic))
+       if (pass1_silicon(nic->pdev))
                idx_addr = NIC_PF_CPI_0_2047_CFG;
        else
                idx_addr = NIC_PF_MPI_0_2047_CFG;
index dde8dc720cd3f3b7d513776662a4f05b1241486d..c24cb2a86a42f997c31536947ff0ad7f37bd577c 100644 (file)
@@ -525,14 +525,22 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
                   __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
                   cqe_tx->sqe_ptr, hdr->subdesc_cnt);
 
-       nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
        nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
        skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
-       /* For TSO offloaded packets only one head SKB needs to be freed */
+       /* For TSO offloaded packets only one SQE will have a valid SKB */
        if (skb) {
+               nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
                prefetch(skb);
                dev_consume_skb_any(skb);
                sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
+       } else {
+               /* In case of HW TSO, HW sends a CQE for each segment of a TSO
+                * packet instead of a single CQE for the whole TSO packet
+                * transmitted. Each of this CQE points to the same SQE, so
+                * avoid freeing same SQE multiple times.
+                */
+               if (!nic->hw_tso)
+                       nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
        }
 }
 
@@ -1549,6 +1557,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
 
+       if (!pass1_silicon(nic->pdev))
+               nic->hw_tso = true;
+
        netdev->netdev_ops = &nicvf_netdev_ops;
        netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
 
index 1fbd9084333e89e542e00bf592747b7000301a1d..d0d1b54900610046955a390f6f5c87ce45cbf1df 100644 (file)
@@ -299,7 +299,7 @@ static int nicvf_init_cmp_queue(struct nicvf *nic,
                return err;
 
        cq->desc = cq->dmem.base;
-       cq->thresh = CMP_QUEUE_CQE_THRESH;
+       cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH;
        nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
 
        return 0;
@@ -925,7 +925,7 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
 {
        int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
 
-       if (skb_shinfo(skb)->gso_size) {
+       if (skb_shinfo(skb)->gso_size && !nic->hw_tso) {
                subdesc_cnt = nicvf_tso_count_subdescs(skb);
                return subdesc_cnt;
        }
@@ -940,7 +940,7 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
  * First subdescriptor for every send descriptor.
  */
 static inline void
-nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
+nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
                         int subdesc_cnt, struct sk_buff *skb, int len)
 {
        int proto;
@@ -976,6 +976,15 @@ nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
                        break;
                }
        }
+
+       if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
+               hdr->tso = 1;
+               hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb);
+               hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
+               /* For non-tunneled pkts, point this to L2 ethertype */
+               hdr->inner_l3_offset = skb_network_offset(skb) - 2;
+               nic->drv_stats.tx_tso++;
+       }
 }
 
 /* SQ GATHER subdescriptor
@@ -1045,7 +1054,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
                        data_left -= size;
                        tso_build_data(skb, &tso, size);
                }
-               nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
+               nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry,
                                         seg_subdescs - 1, skb, seg_len);
                sq->skbuff[hdr_qentry] = (u64)NULL;
                qentry = nicvf_get_nxt_sqentry(sq, qentry);
@@ -1098,11 +1107,12 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
        qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
 
        /* Check if its a TSO packet */
-       if (skb_shinfo(skb)->gso_size)
+       if (skb_shinfo(skb)->gso_size && !nic->hw_tso)
                return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
 
        /* Add SQ header subdesc */
-       nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len);
+       nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
+                                skb, skb->len);
 
        /* Add SQ gather subdescs */
        qentry = nicvf_get_nxt_sqentry(sq, qentry);
index a4f6667fdbd44ad01245849377ad994bfb33c9a8..c5030a7f213ae57e9799142958e5d4fe64a8fbf5 100644 (file)
@@ -75,7 +75,7 @@
  */
 #define CMP_QSIZE              CMP_QUEUE_SIZE2
 #define CMP_QUEUE_LEN          (1ULL << (CMP_QSIZE + 10))
-#define CMP_QUEUE_CQE_THRESH   0
+#define CMP_QUEUE_CQE_THRESH   (NAPI_POLL_WEIGHT / 2)
 #define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */
 
 #define RBDR_SIZE              RBDR_SIZE0
index 3c1de97b1add9425f47477a9db04e740ea3d0c96..9e6d9876bfd0488361cc130b90fa7297fb9db0fb 100644 (file)
@@ -545,25 +545,28 @@ struct sq_hdr_subdesc {
        u64    subdesc_cnt:8;
        u64    csum_l4:2;
        u64    csum_l3:1;
-       u64    rsvd0:5;
+       u64    csum_inner_l4:2;
+       u64    csum_inner_l3:1;
+       u64    rsvd0:2;
        u64    l4_offset:8;
        u64    l3_offset:8;
        u64    rsvd1:4;
        u64    tot_len:20; /* W0 */
 
-       u64    tso_sdc_cont:8;
-       u64    tso_sdc_first:8;
-       u64    tso_l4_offset:8;
-       u64    tso_flags_last:12;
-       u64    tso_flags_first:12;
-       u64    rsvd2:2;
+       u64    rsvd2:24;
+       u64    inner_l4_offset:8;
+       u64    inner_l3_offset:8;
+       u64    tso_start:8;
+       u64    rsvd3:2;
        u64    tso_max_paysize:14; /* W1 */
 #elif defined(__LITTLE_ENDIAN_BITFIELD)
        u64    tot_len:20;
        u64    rsvd1:4;
        u64    l3_offset:8;
        u64    l4_offset:8;
-       u64    rsvd0:5;
+       u64    rsvd0:2;
+       u64    csum_inner_l3:1;
+       u64    csum_inner_l4:2;
        u64    csum_l3:1;
        u64    csum_l4:2;
        u64    subdesc_cnt:8;
@@ -574,12 +577,11 @@ struct sq_hdr_subdesc {
        u64    subdesc_type:4; /* W0 */
 
        u64    tso_max_paysize:14;
-       u64    rsvd2:2;
-       u64    tso_flags_first:12;
-       u64    tso_flags_last:12;
-       u64    tso_l4_offset:8;
-       u64    tso_sdc_first:8;
-       u64    tso_sdc_cont:8; /* W1 */
+       u64    rsvd3:2;
+       u64    tso_start:8;
+       u64    inner_l3_offset:8;
+       u64    inner_l4_offset:8;
+       u64    rsvd2:24; /* W1 */
 #endif
 };
 
index c308429dd9c7fa0aebf2cee3b951f71f3863d939..d288dcf6062f850f40230ac6de6224e8a70ca37c 100644 (file)
@@ -118,6 +118,11 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
                        ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
                        if (ret) {
                                write_unlock_bh(&ctbl->lock);
+                               dev_err(adap->pdev_dev,
+                                       "CLIP FW cmd failed with error %d, "
+                                       "Connections using %pI6c wont be "
+                                       "offloaded",
+                                       ret, ce->addr6.sin6_addr.s6_addr);
                                return ret;
                        }
                } else {
@@ -127,6 +132,9 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
                }
        } else {
                write_unlock_bh(&ctbl->lock);
+               dev_info(adap->pdev_dev, "CLIP table overflow, "
+                        "Connections using %pI6c wont be offloaded",
+                        (void *)lip);
                return -ENOMEM;
        }
        write_unlock_bh(&ctbl->lock);
@@ -146,6 +154,9 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
        int hash;
        int ret = -1;
 
+       if (!ctbl)
+               return;
+
        hash = clip_addr_hash(ctbl, addr, v6);
 
        read_lock_bh(&ctbl->lock);
index 0d147610a06f13819bd1425984f6ec6b3c34e81a..edd706e739fbf035adee4dacd24fc86650ba56fa 100644 (file)
@@ -4865,15 +4865,25 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
 #if IS_ENABLED(CONFIG_IPV6)
-       adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
-                                         adapter->clipt_end);
-       if (!adapter->clipt) {
-               /* We tolerate a lack of clip_table, giving up
-                * some functionality
+       if ((CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) &&
+           (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
+               /* CLIP functionality is not present in hardware,
+                * hence disable all offload features
                 */
                dev_warn(&pdev->dev,
-                        "could not allocate Clip table, continuing\n");
+                        "CLIP not enabled in hardware, continuing\n");
                adapter->params.offload = 0;
+       } else {
+               adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
+                                                 adapter->clipt_end);
+               if (!adapter->clipt) {
+                       /* We tolerate a lack of clip_table, giving up
+                        * some functionality
+                        */
+                       dev_warn(&pdev->dev,
+                                "could not allocate Clip table, continuing\n");
+                       adapter->params.offload = 0;
+               }
        }
 #endif
        if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
index fc3044c8ac1c9a5e824c02208c63294b6a0ec056..91b52a21a2e7f16cfae0bb536638ee6b61cabf9f 100644 (file)
 #define HASHEN_V(x) ((x) << HASHEN_S)
 #define HASHEN_F    HASHEN_V(1U)
 
+#define ASLIPCOMPEN_S    17
+#define ASLIPCOMPEN_V(x) ((x) << ASLIPCOMPEN_S)
+#define ASLIPCOMPEN_F    ASLIPCOMPEN_V(1U)
+
 #define REQQPARERR_S    16
 #define REQQPARERR_V(x) ((x) << REQQPARERR_S)
 #define REQQPARERR_F    REQQPARERR_V(1U)
index 8966f3159bb2b1da3640e7fe9fd2b579b97457e4..3acde3b9b767c4c025b57130c0e497e62b6cd46c 100644 (file)
@@ -1990,7 +1990,7 @@ SetMulticastFilter(struct net_device *dev)
 
 static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
 
-static int __init de4x5_eisa_probe (struct device *gendev)
+static int de4x5_eisa_probe(struct device *gendev)
 {
        struct eisa_device *edev;
        u_long iobase;
index ae6e30d39f0f6b07ac8b9b2fec78bd9eb250e535..1d5c3e16d8f4f4ad1f3ba580ee3a4a9c48663551 100644 (file)
@@ -2843,7 +2843,7 @@ static void cleanup_dev(struct net_device *d)
 }
 
 #ifdef CONFIG_EISA
-static int __init hp100_eisa_probe (struct device *gendev)
+static int hp100_eisa_probe(struct device *gendev)
 {
        struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
        struct eisa_device *edev = to_eisa_device(gendev);
index 69707108d23cdeb57bda5c6dd5e3e1aa29faa2b6..98fe5a2cd6e3e1020ac1c346c63e6a21a5a6832a 100644 (file)
@@ -213,8 +213,11 @@ struct e1000_rx_ring {
 };
 
 #define E1000_DESC_UNUSED(R)                                           \
-       ((((R)->next_to_clean > (R)->next_to_use)                       \
-         ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1)
+({                                                                     \
+       unsigned int clean = smp_load_acquire(&(R)->next_to_clean);     \
+       unsigned int use = READ_ONCE((R)->next_to_use);                 \
+       (clean > use ? 0 : (R)->count) + clean - use - 1;               \
+})
 
 #define E1000_RX_DESC_EXT(R, i)                                                \
        (&(((union e1000_rx_desc_extended *)((R).desc))[i]))
index b1af0d613caabc55f7598b63201f59ca4c27c0e6..8172cf08cc330eb9d4cc61044513a5aed79825e9 100644 (file)
@@ -1,5 +1,5 @@
 /*******************************************************************************
-
+*
   Intel PRO/1000 Linux driver
   Copyright(c) 1999 - 2006 Intel Corporation.
 
@@ -106,7 +106,7 @@ u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = {
            120, 120
 };
 
-static DEFINE_SPINLOCK(e1000_eeprom_lock);
+static DEFINE_MUTEX(e1000_eeprom_lock);
 static DEFINE_SPINLOCK(e1000_phy_lock);
 
 /**
@@ -624,8 +624,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
                /* Workaround for PCI-X problem when BIOS sets MMRBC
                 * incorrectly.
                 */
-               if (hw->bus_type == e1000_bus_type_pcix
-                   && e1000_pcix_get_mmrbc(hw) > 2048)
+               if (hw->bus_type == e1000_bus_type_pcix &&
+                   e1000_pcix_get_mmrbc(hw) > 2048)
                        e1000_pcix_set_mmrbc(hw, 2048);
                break;
        }
@@ -683,10 +683,9 @@ static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
        }
 
        ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1,
-                                   &eeprom_data);
-       if (ret_val) {
+                                   &eeprom_data);
+       if (ret_val)
                return ret_val;
-       }
 
        if (eeprom_data != EEPROM_RESERVED_WORD) {
                /* Adjust SERDES output amplitude only. */
@@ -1074,8 +1073,8 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
 
        if (hw->mac_type <= e1000_82543 ||
            hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
-           hw->mac_type == e1000_82541_rev_2
-           || hw->mac_type == e1000_82547_rev_2)
+           hw->mac_type == e1000_82541_rev_2 ||
+           hw->mac_type == e1000_82547_rev_2)
                hw->phy_reset_disable = false;
 
        return E1000_SUCCESS;
@@ -1652,7 +1651,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
                mii_1000t_ctrl_reg = 0;
        } else {
                ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL,
-                                             mii_1000t_ctrl_reg);
+                                             mii_1000t_ctrl_reg);
                if (ret_val)
                        return ret_val;
        }
@@ -1881,10 +1880,11 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
                if (ret_val)
                        return ret_val;
 
-               if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543)
-                   && (!hw->autoneg)
-                   && (hw->forced_speed_duplex == e1000_10_full
-                       || hw->forced_speed_duplex == e1000_10_half)) {
+               if ((hw->mac_type == e1000_82544 ||
+                    hw->mac_type == e1000_82543) &&
+                   (!hw->autoneg) &&
+                   (hw->forced_speed_duplex == e1000_10_full ||
+                    hw->forced_speed_duplex == e1000_10_half)) {
                        ret_val = e1000_polarity_reversal_workaround(hw);
                        if (ret_val)
                                return ret_val;
@@ -2084,11 +2084,12 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
         * so we had to force link.  In this case, we need to force the
         * configuration of the MAC to match the "fc" parameter.
         */
-       if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed))
-           || ((hw->media_type == e1000_media_type_internal_serdes)
-               && (hw->autoneg_failed))
-           || ((hw->media_type == e1000_media_type_copper)
-               && (!hw->autoneg))) {
+       if (((hw->media_type == e1000_media_type_fiber) &&
+            (hw->autoneg_failed)) ||
+           ((hw->media_type == e1000_media_type_internal_serdes) &&
+            (hw->autoneg_failed)) ||
+           ((hw->media_type == e1000_media_type_copper) &&
+            (!hw->autoneg))) {
                ret_val = e1000_force_mac_fc(hw);
                if (ret_val) {
                        e_dbg("Error forcing flow control settings\n");
@@ -2193,8 +2194,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
                        else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
                                 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
                                 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
-                                (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
-                       {
+                                (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
                                hw->fc = E1000_FC_TX_PAUSE;
                                e_dbg
                                    ("Flow Control = TX PAUSE frames only.\n");
@@ -2210,8 +2210,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
                        else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
                                 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
                                 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
-                                (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
-                       {
+                                (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
                                hw->fc = E1000_FC_RX_PAUSE;
                                e_dbg
                                    ("Flow Control = RX PAUSE frames only.\n");
@@ -2460,10 +2459,11 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
                         * happen due to the execution of this workaround.
                         */
 
-                       if ((hw->mac_type == e1000_82544
-                            || hw->mac_type == e1000_82543) && (!hw->autoneg)
-                           && (hw->forced_speed_duplex == e1000_10_full
-                               || hw->forced_speed_duplex == e1000_10_half)) {
+                       if ((hw->mac_type == e1000_82544 ||
+                            hw->mac_type == e1000_82543) &&
+                           (!hw->autoneg) &&
+                           (hw->forced_speed_duplex == e1000_10_full ||
+                            hw->forced_speed_duplex == e1000_10_half)) {
                                ew32(IMC, 0xffffffff);
                                ret_val =
                                    e1000_polarity_reversal_workaround(hw);
@@ -2528,8 +2528,10 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
                 */
                if (hw->tbi_compatibility_en) {
                        u16 speed, duplex;
+
                        ret_val =
                            e1000_get_speed_and_duplex(hw, &speed, &duplex);
+
                        if (ret_val) {
                                e_dbg
                                    ("Error getting link speed and duplex\n");
@@ -2628,10 +2630,10 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
                            e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data);
                        if (ret_val)
                                return ret_val;
-                       if ((*speed == SPEED_100
-                            && !(phy_data & NWAY_LPAR_100TX_FD_CAPS))
-                           || (*speed == SPEED_10
-                               && !(phy_data & NWAY_LPAR_10T_FD_CAPS)))
+                       if ((*speed == SPEED_100 &&
+                            !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) ||
+                           (*speed == SPEED_10 &&
+                            !(phy_data & NWAY_LPAR_10T_FD_CAPS)))
                                *duplex = HALF_DUPLEX;
                }
        }
@@ -2664,9 +2666,9 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
                ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
                if (ret_val)
                        return ret_val;
-               if (phy_data & MII_SR_AUTONEG_COMPLETE) {
+               if (phy_data & MII_SR_AUTONEG_COMPLETE)
                        return E1000_SUCCESS;
-               }
+
                msleep(100);
        }
        return E1000_SUCCESS;
@@ -2803,11 +2805,11 @@ static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
        return data;
 }
 
-
 /**
  * e1000_read_phy_reg - read a phy register
  * @hw: Struct containing variables accessed by shared code
  * @reg_addr: address of the PHY register to read
+ * @phy_data: pointer to the value on the PHY register
  *
  * Reads the value from a PHY register, if the value is on a specific non zero
  * page, sets the page first.
@@ -2823,14 +2825,13 @@ s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
            (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
                ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
                                                 (u16) reg_addr);
-               if (ret_val) {
-                       spin_unlock_irqrestore(&e1000_phy_lock, flags);
-                       return ret_val;
-               }
+               if (ret_val)
+                       goto out;
        }
 
        ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
                                        phy_data);
+out:
        spin_unlock_irqrestore(&e1000_phy_lock, flags);
 
        return ret_val;
@@ -2881,7 +2882,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
                                e_dbg("MDI Read Error\n");
                                return -E1000_ERR_PHY;
                        }
-                       *phy_data = (u16) mdic;
+                       *phy_data = (u16)mdic;
                } else {
                        mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
                                (phy_addr << E1000_MDIC_PHY_SHIFT) |
@@ -2906,7 +2907,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
                                e_dbg("MDI Error\n");
                                return -E1000_ERR_PHY;
                        }
-                       *phy_data = (u16) mdic;
+                       *phy_data = (u16)mdic;
                }
        } else {
                /* We must first send a preamble through the MDIO pin to signal
@@ -2960,7 +2961,7 @@ s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
        if ((hw->phy_type == e1000_phy_igp) &&
            (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
                ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
-                                                (u16) reg_addr);
+                                                (u16)reg_addr);
                if (ret_val) {
                        spin_unlock_irqrestore(&e1000_phy_lock, flags);
                        return ret_val;
@@ -2993,7 +2994,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
                 * the desired data.
                 */
                if (hw->mac_type == e1000_ce4100) {
-                       mdic = (((u32) phy_data) |
+                       mdic = (((u32)phy_data) |
                                (reg_addr << E1000_MDIC_REG_SHIFT) |
                                (phy_addr << E1000_MDIC_PHY_SHIFT) |
                                (INTEL_CE_GBE_MDIC_OP_WRITE) |
@@ -3015,7 +3016,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
                                return -E1000_ERR_PHY;
                        }
                } else {
-                       mdic = (((u32) phy_data) |
+                       mdic = (((u32)phy_data) |
                                (reg_addr << E1000_MDIC_REG_SHIFT) |
                                (phy_addr << E1000_MDIC_PHY_SHIFT) |
                                (E1000_MDIC_OP_WRITE));
@@ -3053,7 +3054,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
                mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
                        (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
                mdic <<= 16;
-               mdic |= (u32) phy_data;
+               mdic |= (u32)phy_data;
 
                e1000_shift_out_mdi_bits(hw, mdic, 32);
        }
@@ -3176,14 +3177,14 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
        if (ret_val)
                return ret_val;
 
-       hw->phy_id = (u32) (phy_id_high << 16);
+       hw->phy_id = (u32)(phy_id_high << 16);
        udelay(20);
        ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low);
        if (ret_val)
                return ret_val;
 
-       hw->phy_id |= (u32) (phy_id_low & PHY_REVISION_MASK);
-       hw->phy_revision = (u32) phy_id_low & ~PHY_REVISION_MASK;
+       hw->phy_id |= (u32)(phy_id_low & PHY_REVISION_MASK);
+       hw->phy_revision = (u32)phy_id_low & ~PHY_REVISION_MASK;
 
        switch (hw->mac_type) {
        case e1000_82543:
@@ -3401,7 +3402,6 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
                phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >>
                                       SR_1000T_REMOTE_RX_STATUS_SHIFT) ?
                    e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok;
-
        }
 
        return E1000_SUCCESS;
@@ -3449,7 +3449,7 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
        if (hw->phy_type == e1000_phy_igp)
                return e1000_phy_igp_get_info(hw, phy_info);
        else if ((hw->phy_type == e1000_phy_8211) ||
-                (hw->phy_type == e1000_phy_8201))
+                (hw->phy_type == e1000_phy_8201))
                return E1000_SUCCESS;
        else
                return e1000_phy_m88_get_info(hw, phy_info);
@@ -3611,11 +3611,11 @@ static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count)
         */
        mask = 0x01 << (count - 1);
        eecd = er32(EECD);
-       if (eeprom->type == e1000_eeprom_microwire) {
+       if (eeprom->type == e1000_eeprom_microwire)
                eecd &= ~E1000_EECD_DO;
-       } else if (eeprom->type == e1000_eeprom_spi) {
+       else if (eeprom->type == e1000_eeprom_spi)
                eecd |= E1000_EECD_DO;
-       }
+
        do {
                /* A "1" is shifted out to the EEPROM by setting bit "DI" to a
                 * "1", and then raising and then lowering the clock (the SK bit
@@ -3851,7 +3851,7 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
        do {
                e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI,
                                        hw->eeprom.opcode_bits);
-               spi_stat_reg = (u8) e1000_shift_in_ee_bits(hw, 8);
+               spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8);
                if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI))
                        break;
 
@@ -3882,9 +3882,10 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
 s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
 {
        s32 ret;
-       spin_lock(&e1000_eeprom_lock);
+
+       mutex_lock(&e1000_eeprom_lock);
        ret = e1000_do_read_eeprom(hw, offset, words, data);
-       spin_unlock(&e1000_eeprom_lock);
+       mutex_unlock(&e1000_eeprom_lock);
        return ret;
 }
 
@@ -3896,15 +3897,16 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
 
        if (hw->mac_type == e1000_ce4100) {
                GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words,
-                                     data);
+                                     data);
                return E1000_SUCCESS;
        }
 
        /* A check for invalid values:  offset too large, too many words, and
         * not enough words.
         */
-       if ((offset >= eeprom->word_size)
-           || (words > eeprom->word_size - offset) || (words == 0)) {
+       if ((offset >= eeprom->word_size) ||
+           (words > eeprom->word_size - offset) ||
+           (words == 0)) {
                e_dbg("\"words\" parameter out of bounds. Words = %d,"
                      "size = %d\n", offset, eeprom->word_size);
                return -E1000_ERR_EEPROM;
@@ -3940,7 +3942,7 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
 
                /* Send the READ command (opcode + addr)  */
                e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits);
-               e1000_shift_out_ee_bits(hw, (u16) (offset * 2),
+               e1000_shift_out_ee_bits(hw, (u16)(offset * 2),
                                        eeprom->address_bits);
 
                /* Read the data.  The address of the eeprom internally
@@ -3960,7 +3962,7 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
                        e1000_shift_out_ee_bits(hw,
                                                EEPROM_READ_OPCODE_MICROWIRE,
                                                eeprom->opcode_bits);
-                       e1000_shift_out_ee_bits(hw, (u16) (offset + i),
+                       e1000_shift_out_ee_bits(hw, (u16)(offset + i),
                                                eeprom->address_bits);
 
                        /* Read the data.  For microwire, each word requires the
@@ -3968,6 +3970,7 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
                         */
                        data[i] = e1000_shift_in_ee_bits(hw, 16);
                        e1000_standby_eeprom(hw);
+                       cond_resched();
                }
        }
 
@@ -4004,7 +4007,7 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
                return E1000_SUCCESS;
 
 #endif
-       if (checksum == (u16) EEPROM_SUM)
+       if (checksum == (u16)EEPROM_SUM)
                return E1000_SUCCESS;
        else {
                e_dbg("EEPROM Checksum Invalid\n");
@@ -4031,7 +4034,7 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
                }
                checksum += eeprom_data;
        }
-       checksum = (u16) EEPROM_SUM - checksum;
+       checksum = (u16)EEPROM_SUM - checksum;
        if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
                e_dbg("EEPROM Write Error\n");
                return -E1000_ERR_EEPROM;
@@ -4052,9 +4055,10 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
 s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
 {
        s32 ret;
-       spin_lock(&e1000_eeprom_lock);
+
+       mutex_lock(&e1000_eeprom_lock);
        ret = e1000_do_write_eeprom(hw, offset, words, data);
-       spin_unlock(&e1000_eeprom_lock);
+       mutex_unlock(&e1000_eeprom_lock);
        return ret;
 }
 
@@ -4066,15 +4070,16 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
 
        if (hw->mac_type == e1000_ce4100) {
                GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words,
-                                      data);
+                                      data);
                return E1000_SUCCESS;
        }
 
        /* A check for invalid values:  offset too large, too many words, and
         * not enough words.
         */
-       if ((offset >= eeprom->word_size)
-           || (words > eeprom->word_size - offset) || (words == 0)) {
+       if ((offset >= eeprom->word_size) ||
+           (words > eeprom->word_size - offset) ||
+           (words == 0)) {
                e_dbg("\"words\" parameter out of bounds\n");
                return -E1000_ERR_EEPROM;
        }
@@ -4116,6 +4121,7 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
                        return -E1000_ERR_EEPROM;
 
                e1000_standby_eeprom(hw);
+               cond_resched();
 
                /*  Send the WRITE ENABLE command (8 bit opcode )  */
                e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI,
@@ -4132,7 +4138,7 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
                /* Send the Write command (8-bit opcode + addr) */
                e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits);
 
-               e1000_shift_out_ee_bits(hw, (u16) ((offset + widx) * 2),
+               e1000_shift_out_ee_bits(hw, (u16)((offset + widx) * 2),
                                        eeprom->address_bits);
 
                /* Send the data */
@@ -4142,6 +4148,7 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
                 */
                while (widx < words) {
                        u16 word_out = data[widx];
+
                        word_out = (word_out >> 8) | (word_out << 8);
                        e1000_shift_out_ee_bits(hw, word_out, 16);
                        widx++;
@@ -4183,9 +4190,9 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
         * EEPROM into write/erase mode.
         */
        e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE,
-                               (u16) (eeprom->opcode_bits + 2));
+                               (u16)(eeprom->opcode_bits + 2));
 
-       e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2));
+       e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
 
        /* Prepare the EEPROM */
        e1000_standby_eeprom(hw);
@@ -4195,7 +4202,7 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
                e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE,
                                        eeprom->opcode_bits);
 
-               e1000_shift_out_ee_bits(hw, (u16) (offset + words_written),
+               e1000_shift_out_ee_bits(hw, (u16)(offset + words_written),
                                        eeprom->address_bits);
 
                /* Send the data */
@@ -4224,6 +4231,7 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
 
                /* Recover from write */
                e1000_standby_eeprom(hw);
+               cond_resched();
 
                words_written++;
        }
@@ -4235,9 +4243,9 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
         * EEPROM out of write/erase mode.
         */
        e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE,
-                               (u16) (eeprom->opcode_bits + 2));
+                               (u16)(eeprom->opcode_bits + 2));
 
-       e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2));
+       e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
 
        return E1000_SUCCESS;
 }
@@ -4260,8 +4268,8 @@ s32 e1000_read_mac_addr(struct e1000_hw *hw)
                        e_dbg("EEPROM Read Error\n");
                        return -E1000_ERR_EEPROM;
                }
-               hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF);
-               hw->perm_mac_addr[i + 1] = (u8) (eeprom_data >> 8);
+               hw->perm_mac_addr[i] = (u8)(eeprom_data & 0x00FF);
+               hw->perm_mac_addr[i + 1] = (u8)(eeprom_data >> 8);
        }
 
        switch (hw->mac_type) {
@@ -4328,19 +4336,19 @@ u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
                 */
        case 0:
                /* [47:36] i.e. 0x563 for above example address */
-               hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
+               hash_value = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
                break;
        case 1:
                /* [46:35] i.e. 0xAC6 for above example address */
-               hash_value = ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5));
+               hash_value = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
                break;
        case 2:
                /* [45:34] i.e. 0x5D8 for above example address */
-               hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
+               hash_value = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
                break;
        case 3:
                /* [43:32] i.e. 0x634 for above example address */
-               hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8));
+               hash_value = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
                break;
        }
 
@@ -4361,9 +4369,9 @@ void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
        /* HW expects these in little endian so we reverse the byte order
         * from network order (big endian) to little endian
         */
-       rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
-                  ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
-       rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+       rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
+                  ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
+       rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
 
        /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx
         * unit hang.
@@ -4537,7 +4545,7 @@ s32 e1000_setup_led(struct e1000_hw *hw)
                if (ret_val)
                        return ret_val;
                ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
-                                             (u16) (hw->phy_spd_default &
+                                             (u16)(hw->phy_spd_default &
                                                     ~IGP01E1000_GMII_SPD));
                if (ret_val)
                        return ret_val;
@@ -4802,7 +4810,7 @@ void e1000_reset_adaptive(struct e1000_hw *hw)
 void e1000_update_adaptive(struct e1000_hw *hw)
 {
        if (hw->adaptive_ifs) {
-               if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) {
+               if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) {
                        if (hw->tx_packet_delta > MIN_NUM_XMITS) {
                                hw->in_ifs_mode = true;
                                if (hw->current_ifs_val < hw->ifs_max_val) {
@@ -4816,8 +4824,8 @@ void e1000_update_adaptive(struct e1000_hw *hw)
                                }
                        }
                } else {
-                       if (hw->in_ifs_mode
-                           && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
+                       if (hw->in_ifs_mode &&
+                           (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
                                hw->current_ifs_val = 0;
                                hw->in_ifs_mode = false;
                                ew32(AIT, 0);
@@ -4922,7 +4930,6 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
 
        /* Use old method for Phy older than IGP */
        if (hw->phy_type == e1000_phy_m88) {
-
                ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
                                             &phy_data);
                if (ret_val)
@@ -4966,7 +4973,6 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
                };
                /* Read the AGC registers for all channels */
                for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
-
                        ret_val =
                            e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
                        if (ret_val)
@@ -4976,8 +4982,8 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
 
                        /* Value bound check. */
                        if ((cur_agc_value >=
-                            IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1)
-                           || (cur_agc_value == 0))
+                            IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) ||
+                           (cur_agc_value == 0))
                                return -E1000_ERR_PHY;
 
                        agc_value += cur_agc_value;
@@ -5054,7 +5060,6 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
                 */
                if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
                    IGP01E1000_PSSR_SPEED_1000MBPS) {
-
                        /* Read the GIG initialization PCS register (0x00B4) */
                        ret_val =
                            e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG,
@@ -5175,8 +5180,8 @@ static s32 e1000_1000Mb_check_cable_length(struct e1000_hw *hw)
                                hw->ffe_config_state = e1000_ffe_config_active;
 
                                ret_val = e1000_write_phy_reg(hw,
-                                             IGP01E1000_PHY_DSP_FFE,
-                                             IGP01E1000_PHY_DSP_FFE_CM_CP);
+                                                             IGP01E1000_PHY_DSP_FFE,
+                                                             IGP01E1000_PHY_DSP_FFE_CM_CP);
                                if (ret_val)
                                        return ret_val;
                                break;
@@ -5243,7 +5248,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
                        msleep(20);
 
                        ret_val = e1000_write_phy_reg(hw, 0x0000,
-                                                   IGP01E1000_IEEE_FORCE_GIGA);
+                                                     IGP01E1000_IEEE_FORCE_GIGA);
                        if (ret_val)
                                return ret_val;
                        for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
@@ -5264,7 +5269,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
                        }
 
                        ret_val = e1000_write_phy_reg(hw, 0x0000,
-                                       IGP01E1000_IEEE_RESTART_AUTONEG);
+                                                     IGP01E1000_IEEE_RESTART_AUTONEG);
                        if (ret_val)
                                return ret_val;
 
@@ -5299,7 +5304,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
                        msleep(20);
 
                        ret_val = e1000_write_phy_reg(hw, 0x0000,
-                                                   IGP01E1000_IEEE_FORCE_GIGA);
+                                                     IGP01E1000_IEEE_FORCE_GIGA);
                        if (ret_val)
                                return ret_val;
                        ret_val =
@@ -5309,7 +5314,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
                                return ret_val;
 
                        ret_val = e1000_write_phy_reg(hw, 0x0000,
-                                       IGP01E1000_IEEE_RESTART_AUTONEG);
+                                                     IGP01E1000_IEEE_RESTART_AUTONEG);
                        if (ret_val)
                                return ret_val;
 
@@ -5346,9 +5351,8 @@ static s32 e1000_set_phy_mode(struct e1000_hw *hw)
                ret_val =
                    e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1,
                                      &eeprom_data);
-               if (ret_val) {
+               if (ret_val)
                        return ret_val;
-               }
 
                if ((eeprom_data != EEPROM_RESERVED_WORD) &&
                    (eeprom_data & EEPROM_PHY_CLASS_A)) {
@@ -5395,8 +5399,8 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
         * from the lowest speeds starting from 10Mbps. The capability is used
         * for Dx transitions and states
         */
-       if (hw->mac_type == e1000_82541_rev_2
-           || hw->mac_type == e1000_82547_rev_2) {
+       if (hw->mac_type == e1000_82541_rev_2 ||
+           hw->mac_type == e1000_82547_rev_2) {
                ret_val =
                    e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data);
                if (ret_val)
@@ -5446,11 +5450,9 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
                        if (ret_val)
                                return ret_val;
                }
-       } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT)
-                  || (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL)
-                  || (hw->autoneg_advertised ==
-                      AUTONEG_ADVERTISE_10_100_ALL)) {
-
+       } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) ||
+                  (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL) ||
+                  (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) {
                if (hw->mac_type == e1000_82541_rev_2 ||
                    hw->mac_type == e1000_82547_rev_2) {
                        phy_data |= IGP01E1000_GMII_FLEX_SPD;
@@ -5474,7 +5476,6 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
                                        phy_data);
                if (ret_val)
                        return ret_val;
-
        }
        return E1000_SUCCESS;
 }
@@ -5542,7 +5543,6 @@ static s32 e1000_set_vco_speed(struct e1000_hw *hw)
        return E1000_SUCCESS;
 }
 
-
 /**
  * e1000_enable_mng_pass_thru - check for bmc pass through
  * @hw: Struct containing variables accessed by shared code
index fd7be860c20131e7db9a557dcdd40db348d9edf5..3fc7bde699ba58e2720b4b4039b31fd4cfdeb706 100644 (file)
@@ -99,13 +99,13 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
-                             struct e1000_tx_ring *txdr);
+                                   struct e1000_tx_ring *txdr);
 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
-                             struct e1000_rx_ring *rxdr);
+                                   struct e1000_rx_ring *rxdr);
 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
-                             struct e1000_tx_ring *tx_ring);
+                                   struct e1000_tx_ring *tx_ring);
 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
-                             struct e1000_rx_ring *rx_ring);
+                                   struct e1000_rx_ring *rx_ring);
 void e1000_update_stats(struct e1000_adapter *adapter);
 
 static int e1000_init_module(void);
@@ -122,16 +122,16 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter);
 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
-                                struct e1000_tx_ring *tx_ring);
+                               struct e1000_tx_ring *tx_ring);
 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
-                                struct e1000_rx_ring *rx_ring);
+                               struct e1000_rx_ring *rx_ring);
 static void e1000_set_rx_mode(struct net_device *netdev);
 static void e1000_update_phy_info_task(struct work_struct *work);
 static void e1000_watchdog(struct work_struct *work);
 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                                    struct net_device *netdev);
-static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
+static struct net_device_stats *e1000_get_stats(struct net_device *netdev);
 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
 static int e1000_set_mac(struct net_device *netdev, void *p);
 static irqreturn_t e1000_intr(int irq, void *data);
@@ -164,7 +164,7 @@ static void e1000_tx_timeout(struct net_device *dev);
 static void e1000_reset_task(struct work_struct *work);
 static void e1000_smartspeed(struct e1000_adapter *adapter);
 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
-                                       struct sk_buff *skb);
+                                      struct sk_buff *skb);
 
 static bool e1000_vlan_used(struct e1000_adapter *adapter);
 static void e1000_vlan_mode(struct net_device *netdev,
@@ -195,7 +195,7 @@ MODULE_PARM_DESC(copybreak,
        "Maximum size of packet that is copied to a new buffer on receive");
 
 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
-                     pci_channel_state_t state);
+                                               pci_channel_state_t state);
 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
 static void e1000_io_resume(struct pci_dev *pdev);
 
@@ -287,7 +287,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
        int err;
 
        err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
-                         netdev);
+                         netdev);
        if (err) {
                e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
        }
@@ -636,8 +636,8 @@ void e1000_reset(struct e1000_adapter *adapter)
                 * but don't include ethernet FCS because hardware appends it
                 */
                min_tx_space = (hw->max_frame_size +
-                               sizeof(struct e1000_tx_desc) -
-                               ETH_FCS_LEN) * 2;
+                               sizeof(struct e1000_tx_desc) -
+                               ETH_FCS_LEN) * 2;
                min_tx_space = ALIGN(min_tx_space, 1024);
                min_tx_space >>= 10;
                /* software strips receive CRC, so leave room for it */
@@ -943,8 +943,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct e1000_adapter *adapter;
        struct e1000_hw *hw;
 
-       static int cards_found = 0;
-       static int global_quad_port_a = 0; /* global ksp3 port a indication */
+       static int cards_found;
+       static int global_quad_port_a; /* global ksp3 port a indication */
        int i, err, pci_using_dac;
        u16 eeprom_data = 0;
        u16 tmp = 0;
@@ -1046,7 +1046,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (hw->mac_type == e1000_ce4100) {
                hw->ce4100_gbe_mdio_base_virt =
                                        ioremap(pci_resource_start(pdev, BAR_1),
-                                               pci_resource_len(pdev, BAR_1));
+                                               pci_resource_len(pdev, BAR_1));
 
                if (!hw->ce4100_gbe_mdio_base_virt)
                        goto err_mdio_ioremap;
@@ -1148,7 +1148,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                break;
        case e1000_82546:
        case e1000_82546_rev_3:
-               if (er32(STATUS) & E1000_STATUS_FUNC_1){
+               if (er32(STATUS) & E1000_STATUS_FUNC_1) {
                        e1000_read_eeprom(hw,
                                EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
                        break;
@@ -1199,13 +1199,13 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                for (i = 0; i < 32; i++) {
                        hw->phy_addr = i;
                        e1000_read_phy_reg(hw, PHY_ID2, &tmp);
-                       if (tmp == 0 || tmp == 0xFF) {
-                               if (i == 31)
-                                       goto err_eeprom;
-                               continue;
-                       } else
+
+                       if (tmp != 0 && tmp != 0xFF)
                                break;
                }
+
+               if (i >= 32)
+                       goto err_eeprom;
        }
 
        /* reset the hardware with the new settings */
@@ -1263,7 +1263,7 @@ err_pci_reg:
  * @pdev: PCI device information struct
  *
  * e1000_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device.  The could be caused by a
+ * that it should release a PCI device. That could be caused by a
  * Hot-Plug event, or because the driver is going to be removed from
  * memory.
  **/
@@ -1334,12 +1334,12 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
 static int e1000_alloc_queues(struct e1000_adapter *adapter)
 {
        adapter->tx_ring = kcalloc(adapter->num_tx_queues,
-                                  sizeof(struct e1000_tx_ring), GFP_KERNEL);
+                                  sizeof(struct e1000_tx_ring), GFP_KERNEL);
        if (!adapter->tx_ring)
                return -ENOMEM;
 
        adapter->rx_ring = kcalloc(adapter->num_rx_queues,
-                                  sizeof(struct e1000_rx_ring), GFP_KERNEL);
+                                  sizeof(struct e1000_rx_ring), GFP_KERNEL);
        if (!adapter->rx_ring) {
                kfree(adapter->tx_ring);
                return -ENOMEM;
@@ -1811,20 +1811,20 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
        rctl &= ~E1000_RCTL_SZ_4096;
        rctl |= E1000_RCTL_BSEX;
        switch (adapter->rx_buffer_len) {
-               case E1000_RXBUFFER_2048:
-               default:
-                       rctl |= E1000_RCTL_SZ_2048;
-                       rctl &= ~E1000_RCTL_BSEX;
-                       break;
-               case E1000_RXBUFFER_4096:
-                       rctl |= E1000_RCTL_SZ_4096;
-                       break;
-               case E1000_RXBUFFER_8192:
-                       rctl |= E1000_RCTL_SZ_8192;
-                       break;
-               case E1000_RXBUFFER_16384:
-                       rctl |= E1000_RCTL_SZ_16384;
-                       break;
+       case E1000_RXBUFFER_2048:
+       default:
+               rctl |= E1000_RCTL_SZ_2048;
+               rctl &= ~E1000_RCTL_BSEX;
+               break;
+       case E1000_RXBUFFER_4096:
+               rctl |= E1000_RCTL_SZ_4096;
+               break;
+       case E1000_RXBUFFER_8192:
+               rctl |= E1000_RCTL_SZ_8192;
+               break;
+       case E1000_RXBUFFER_16384:
+               rctl |= E1000_RCTL_SZ_16384;
+               break;
        }
 
        /* This is useful for sniffing bad packets. */
@@ -1861,12 +1861,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
 
        if (adapter->netdev->mtu > ETH_DATA_LEN) {
                rdlen = adapter->rx_ring[0].count *
-                       sizeof(struct e1000_rx_desc);
+                       sizeof(struct e1000_rx_desc);
                adapter->clean_rx = e1000_clean_jumbo_rx_irq;
                adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
        } else {
                rdlen = adapter->rx_ring[0].count *
-                       sizeof(struct e1000_rx_desc);
+                       sizeof(struct e1000_rx_desc);
                adapter->clean_rx = e1000_clean_rx_irq;
                adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
        }
@@ -2761,7 +2761,9 @@ static int e1000_tso(struct e1000_adapter *adapter,
                buffer_info->time_stamp = jiffies;
                buffer_info->next_to_watch = i;
 
-               if (++i == tx_ring->count) i = 0;
+               if (++i == tx_ring->count)
+                       i = 0;
+
                tx_ring->next_to_use = i;
 
                return true;
@@ -2816,7 +2818,9 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
        buffer_info->time_stamp = jiffies;
        buffer_info->next_to_watch = i;
 
-       if (unlikely(++i == tx_ring->count)) i = 0;
+       if (unlikely(++i == tx_ring->count))
+               i = 0;
+
        tx_ring->next_to_use = i;
 
        return true;
@@ -2865,8 +2869,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
                 */
                if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
-                               (size > 2015) && count == 0))
-                       size = 2015;
+                            (size > 2015) && count == 0))
+                       size = 2015;
 
                /* Workaround for potential 82544 hang in PCI-X.  Avoid
                 * terminating buffers within evenly-aligned dwords.
@@ -2963,7 +2967,7 @@ dma_error:
                count--;
 
        while (count--) {
-               if (i==0)
+               if (i == 0)
                        i += tx_ring->count;
                i--;
                buffer_info = &tx_ring->buffer_info[i];
@@ -3013,7 +3017,8 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
                tx_desc->lower.data =
                        cpu_to_le32(txd_lower | buffer_info->length);
                tx_desc->upper.data = cpu_to_le32(txd_upper);
-               if (unlikely(++i == tx_ring->count)) i = 0;
+               if (unlikely(++i == tx_ring->count))
+                       i = 0;
        }
 
        tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
@@ -3101,7 +3106,7 @@ static int e1000_maybe_stop_tx(struct net_device *netdev,
        return __e1000_maybe_stop_tx(netdev, size);
 }
 
-#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
+#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                                    struct net_device *netdev)
 {
@@ -3841,7 +3846,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
        struct e1000_tx_buffer *buffer_info;
        unsigned int i, eop;
        unsigned int count = 0;
-       unsigned int total_tx_bytes=0, total_tx_packets=0;
+       unsigned int total_tx_bytes = 0, total_tx_packets = 0;
        unsigned int bytes_compl = 0, pkts_compl = 0;
 
        i = tx_ring->next_to_clean;
@@ -3869,14 +3874,18 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
                        e1000_unmap_and_free_tx_resource(adapter, buffer_info);
                        tx_desc->upper.data = 0;
 
-                       if (unlikely(++i == tx_ring->count)) i = 0;
+                       if (unlikely(++i == tx_ring->count))
+                               i = 0;
                }
 
                eop = tx_ring->buffer_info[i].next_to_watch;
                eop_desc = E1000_TX_DESC(*tx_ring, eop);
        }
 
-       tx_ring->next_to_clean = i;
+       /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
+        * which will reuse the cleaned buffers.
+        */
+       smp_store_release(&tx_ring->next_to_clean, i);
 
        netdev_completed_queue(netdev, pkts_compl, bytes_compl);
 
@@ -3954,9 +3963,11 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
        skb_checksum_none_assert(skb);
 
        /* 82543 or newer only */
-       if (unlikely(hw->mac_type < e1000_82543)) return;
+       if (unlikely(hw->mac_type < e1000_82543))
+               return;
        /* Ignore Checksum bit is set */
-       if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
+       if (unlikely(status & E1000_RXD_STAT_IXSM))
+               return;
        /* TCP/UDP checksum error bit is set */
        if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
                /* let the stack verify checksum errors */
@@ -4136,7 +4147,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
        unsigned int i;
        int cleaned_count = 0;
        bool cleaned = false;
-       unsigned int total_rx_bytes=0, total_rx_packets=0;
+       unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 
        i = rx_ring->next_to_clean;
        rx_desc = E1000_RX_DESC(*rx_ring, i);
@@ -4153,7 +4164,9 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
 
                status = rx_desc->status;
 
-               if (++i == rx_ring->count) i = 0;
+               if (++i == rx_ring->count)
+                       i = 0;
+
                next_rxd = E1000_RX_DESC(*rx_ring, i);
                prefetch(next_rxd);
 
@@ -4356,7 +4369,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
        unsigned int i;
        int cleaned_count = 0;
        bool cleaned = false;
-       unsigned int total_rx_bytes=0, total_rx_packets=0;
+       unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 
        i = rx_ring->next_to_clean;
        rx_desc = E1000_RX_DESC(*rx_ring, i);
@@ -4395,7 +4408,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
                        buffer_info->rxbuf.data = NULL;
                }
 
-               if (++i == rx_ring->count) i = 0;
+               if (++i == rx_ring->count)
+                       i = 0;
+
                next_rxd = E1000_RX_DESC(*rx_ring, i);
                prefetch(next_rxd);
 
@@ -4683,9 +4698,11 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
                 * we assume back-to-back
                 */
                e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
-               if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+               if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
+                       return;
                e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
-               if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+               if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
+                       return;
                e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
                if (phy_ctrl & CR_1000T_MS_ENABLE) {
                        phy_ctrl &= ~CR_1000T_MS_ENABLE;
index c9da4654e9cad64b64ee305ea5a2aa37c0906338..b3949d5bef5c3b7e8f86ca2719f1fdf77353ae95 100644 (file)
@@ -91,6 +91,7 @@ struct e1000_hw;
 #define E1000_DEV_ID_PCH_SPT_I219_V            0x1570  /* SPT PCH */
 #define E1000_DEV_ID_PCH_SPT_I219_LM2          0x15B7  /* SPT-H PCH */
 #define E1000_DEV_ID_PCH_SPT_I219_V2           0x15B8  /* SPT-H PCH */
+#define E1000_DEV_ID_PCH_LBG_I219_LM3          0x15B9  /* LBG PCH */
 
 #define E1000_REVISION_4       4
 
index 91a5a0ae9cd73932648492ce532b0e1260f1419c..a049e30639a13d75cd66185aa225bb3d15847d8e 100644 (file)
@@ -1984,7 +1984,7 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
        int i = 0;
 
        while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
-              (i++ < 10))
+              (i++ < 30))
                usleep_range(10000, 20000);
        return blocked ? E1000_BLK_PHY_RESET : 0;
 }
@@ -3093,24 +3093,45 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
        struct e1000_nvm_info *nvm = &hw->nvm;
        u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
        u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
+       u32 nvm_dword = 0;
        u8 sig_byte = 0;
        s32 ret_val;
 
        switch (hw->mac.type) {
-               /* In SPT, read from the CTRL_EXT reg instead of
-                * accessing the sector valid bits from the nvm
-                */
        case e1000_pch_spt:
-               *bank = er32(CTRL_EXT)
-                   & E1000_CTRL_EXT_NVMVS;
-               if ((*bank == 0) || (*bank == 1)) {
-                       e_dbg("ERROR: No valid NVM bank present\n");
-                       return -E1000_ERR_NVM;
-               } else {
-                       *bank = *bank - 2;
+               bank1_offset = nvm->flash_bank_size;
+               act_offset = E1000_ICH_NVM_SIG_WORD;
+
+               /* set bank to 0 in case flash read fails */
+               *bank = 0;
+
+               /* Check bank 0 */
+               ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
+                                                        &nvm_dword);
+               if (ret_val)
+                       return ret_val;
+               sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
+               if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
+                   E1000_ICH_NVM_SIG_VALUE) {
+                       *bank = 0;
                        return 0;
                }
-               break;
+
+               /* Check bank 1 */
+               ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
+                                                        bank1_offset,
+                                                        &nvm_dword);
+               if (ret_val)
+                       return ret_val;
+               sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
+               if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
+                   E1000_ICH_NVM_SIG_VALUE) {
+                       *bank = 1;
+                       return 0;
+               }
+
+               e_dbg("ERROR: No valid NVM bank present\n");
+               return -E1000_ERR_NVM;
        case e1000_ich8lan:
        case e1000_ich9lan:
                eecd = er32(EECD);
index 0a854a47d31a77ef0723231dd75e3aceddc5d6b0..775e38910681a617eee363309fd97e90a445afe9 100644 (file)
@@ -1959,8 +1959,10 @@ static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data)
         * previous interrupt.
         */
        if (rx_ring->set_itr) {
-               writel(1000000000 / (rx_ring->itr_val * 256),
-                      rx_ring->itr_register);
+               u32 itr = rx_ring->itr_val ?
+                         1000000000 / (rx_ring->itr_val * 256) : 0;
+
+               writel(itr, rx_ring->itr_register);
                rx_ring->set_itr = 0;
        }
 
@@ -7465,6 +7467,7 @@ static const struct pci_device_id e1000_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V), board_pch_spt },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt },
+       { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LBG_I219_LM3), board_pch_spt },
 
        { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
 };
@@ -7504,14 +7507,11 @@ static struct pci_driver e1000_driver = {
  **/
 static int __init e1000_init_module(void)
 {
-       int ret;
-
        pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
                e1000e_driver_version);
        pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n");
-       ret = pci_register_driver(&e1000_driver);
 
-       return ret;
+       return pci_register_driver(&e1000_driver);
 }
 module_init(e1000_init_module);
 
index 08859dd220a8b69981ecbdb744e8c40f0a07d9e2..b006ff66d028644243393229b5e231ae47301127 100644 (file)
@@ -1,7 +1,7 @@
 ################################################################################
 #
 # Intel Ethernet Switch Host Interface Driver
-# Copyright(c) 2013 - 2014 Intel Corporation.
+# Copyright(c) 2013 - 2015 Intel Corporation.
 #
 # This program is free software; you can redistribute it and/or modify it
 # under the terms and conditions of the GNU General Public License,
 
 obj-$(CONFIG_FM10K) += fm10k.o
 
-fm10k-objs := fm10k_main.o fm10k_common.o fm10k_pci.o \
-             fm10k_netdev.o fm10k_ethtool.o fm10k_pf.o fm10k_vf.o \
-             fm10k_mbx.o fm10k_iov.o fm10k_tlv.o \
-             fm10k_debugfs.o fm10k_ptp.o fm10k_dcbnl.o
+fm10k-y := fm10k_main.o \
+          fm10k_common.o \
+          fm10k_pci.o \
+          fm10k_ptp.o \
+          fm10k_netdev.o \
+          fm10k_ethtool.o \
+          fm10k_pf.o \
+          fm10k_vf.o \
+          fm10k_mbx.o \
+          fm10k_iov.o \
+          fm10k_tlv.o
+
+fm10k-$(CONFIG_DEBUG_FS) += fm10k_debugfs.o
+fm10k-$(CONFIG_DCB) += fm10k_dcbnl.o
index fa26e20445a530ce8a5b79e7a69609461579ce84..b34bb008b1045a0163fe9b4bb2acb93e7c861f42 100644 (file)
@@ -23,6 +23,7 @@
 
 #include <linux/types.h>
 #include <linux/etherdevice.h>
+#include <linux/cpumask.h>
 #include <linux/rtnetlink.h>
 #include <linux/if_vlan.h>
 #include <linux/pci.h>
@@ -66,6 +67,7 @@ struct fm10k_l2_accel {
 enum fm10k_ring_state_t {
        __FM10K_TX_DETECT_HANG,
        __FM10K_HANG_CHECK_ARMED,
+       __FM10K_TX_XPS_INIT_DONE,
 };
 
 #define check_for_tx_hang(ring) \
@@ -138,7 +140,7 @@ struct fm10k_ring {
                                         * different for DCB and RSS modes
                                         */
        u8 qos_pc;                      /* priority class of queue */
-       u16 vid;                        /* default vlan ID of queue */
+       u16 vid;                        /* default VLAN ID of queue */
        u16 count;                      /* amount of descriptors */
 
        u16 next_to_alloc;
@@ -164,7 +166,7 @@ struct fm10k_ring_container {
        unsigned int total_packets;     /* total packets processed this int */
        u16 work_limit;                 /* total work allowed per interrupt */
        u16 itr;                        /* interrupt throttle rate value */
-       u8 itr_scale;                   /* ITR adjustment scaler based on PCI speed */
+       u8 itr_scale;                   /* ITR adjustment based on PCI speed */
        u8 count;                       /* total number of rings in vector */
 };
 
@@ -209,6 +211,7 @@ struct fm10k_q_vector {
        struct fm10k_ring_container rx, tx;
 
        struct napi_struct napi;
+       cpumask_t affinity_mask;
        char name[IFNAMSIZ + 9];
 
 #ifdef CONFIG_DEBUG_FS
@@ -419,7 +422,7 @@ static inline u16 fm10k_desc_unused(struct fm10k_ring *ring)
         (&(((union fm10k_rx_desc *)((R)->desc))[i]))
 
 #define FM10K_MAX_TXD_PWR      14
-#define FM10K_MAX_DATA_PER_TXD (1 << FM10K_MAX_TXD_PWR)
+#define FM10K_MAX_DATA_PER_TXD BIT(FM10K_MAX_TXD_PWR)
 
 /* Tx Descriptors needed, worst case */
 #define TXD_USE_COUNT(S)       DIV_ROUND_UP((S), FM10K_MAX_DATA_PER_TXD)
@@ -440,7 +443,7 @@ union fm10k_ftag_info {
        struct {
                /* dglort and sglort combined into a single 32bit desc read */
                __le32 glort;
-               /* upper 16 bits of vlan are reserved 0 for swpri_type_user */
+               /* upper 16 bits of VLAN are reserved 0 for swpri_type_user */
                __le32 vlan;
        } d;
        struct {
@@ -557,5 +560,9 @@ int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
 int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
 
 /* DCB */
+#ifdef CONFIG_DCB
 void fm10k_dcbnl_set_ops(struct net_device *dev);
+#else
+static inline void fm10k_dcbnl_set_ops(struct net_device *dev) {}
+#endif
 #endif /* _FM10K_H_ */
index 5c7a4d7662d895e42680ebc668ffd941187a8649..2be4361839db2d270e13601b039953293bd96755 100644 (file)
@@ -20,7 +20,6 @@
 
 #include "fm10k.h"
 
-#ifdef CONFIG_DCB
 /**
  * fm10k_dcbnl_ieee_getets - get the ETS configuration for the device
  * @dev: netdev interface for the device
@@ -155,7 +154,6 @@ static const struct dcbnl_rtnl_ops fm10k_dcbnl_ops = {
        .setdcbx        = fm10k_dcbnl_setdcbx,
 };
 
-#endif /* CONFIG_DCB */
 /**
  * fm10k_dcbnl_set_ops - Configures dcbnl ops pointer for netdev
  * @dev: netdev interface for the device
@@ -164,11 +162,9 @@ static const struct dcbnl_rtnl_ops fm10k_dcbnl_ops = {
  **/
 void fm10k_dcbnl_set_ops(struct net_device *dev)
 {
-#ifdef CONFIG_DCB
        struct fm10k_intfc *interface = netdev_priv(dev);
        struct fm10k_hw *hw = &interface->hw;
 
        if (hw->mac.type == fm10k_mac_pf)
                dev->dcbnl_ops = &fm10k_dcbnl_ops;
-#endif /* CONFIG_DCB */
 }
index 5304bc1fbecd4e218497ea7b7680b7b81d964411..5d6137faf7d17e31504d15eda87a814d1e9a132b 100644 (file)
@@ -18,8 +18,6 @@
  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  */
 
-#ifdef CONFIG_DEBUG_FS
-
 #include "fm10k.h"
 
 #include <linux/debugfs.h>
@@ -258,5 +256,3 @@ void fm10k_dbg_exit(void)
        debugfs_remove_recursive(dbg_root);
        dbg_root = NULL;
 }
-
-#endif /* CONFIG_DEBUG_FS */
index 109e2111bddaace886413e27646a21d8c4f5b009..2f6a05b572288ad04d223f75706616dcdb822893 100644 (file)
@@ -127,7 +127,7 @@ static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = {
 #define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats)
 
 #define FM10K_QUEUE_STATS_LEN(_n) \
-       ( (_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64)))
+       ((_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64)))
 
 #define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \
                                FM10K_NETDEV_STATS_LEN + \
@@ -259,7 +259,8 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset)
                        stats_len += FM10K_DEBUG_STATS_LEN;
 
                        if (iov_data)
-                               stats_len += FM10K_MBX_STATS_LEN * iov_data->num_vfs;
+                               stats_len += FM10K_MBX_STATS_LEN *
+                                       iov_data->num_vfs;
                }
 
                return stats_len;
@@ -298,14 +299,16 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev,
 
        if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
                for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) {
-                       p = (char *)interface + fm10k_gstrings_debug_stats[i].stat_offset;
+                       p = (char *)interface +
+                               fm10k_gstrings_debug_stats[i].stat_offset;
                        *(data++) = (fm10k_gstrings_debug_stats[i].sizeof_stat ==
                                     sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
                }
        }
 
        for (i = 0; i < FM10K_MBX_STATS_LEN; i++) {
-               p = (char *)&interface->hw.mbx + fm10k_gstrings_mbx_stats[i].stat_offset;
+               p = (char *)&interface->hw.mbx +
+                       fm10k_gstrings_mbx_stats[i].stat_offset;
                *(data++) = (fm10k_gstrings_mbx_stats[i].sizeof_stat ==
                        sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
        }
@@ -322,6 +325,7 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev,
        if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) {
                for (i = 0; i < iov_data->num_vfs; i++) {
                        struct fm10k_vf_info *vf_info;
+
                        vf_info = &iov_data->vf_info[i];
 
                        /* skip stats if we don't have a vf info */
@@ -331,7 +335,8 @@ static void fm10k_get_ethtool_stats(struct net_device *netdev,
                        }
 
                        for (j = 0; j < FM10K_MBX_STATS_LEN; j++) {
-                               p = (char *)&vf_info->mbx + fm10k_gstrings_mbx_stats[j].stat_offset;
+                               p = (char *)&vf_info->mbx +
+                                       fm10k_gstrings_mbx_stats[j].stat_offset;
                                *(data++) = (fm10k_gstrings_mbx_stats[j].sizeof_stat ==
                                             sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
                        }
@@ -1020,7 +1025,6 @@ static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags)
        return 0;
 }
 
-
 static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
 {
        return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG;
index 1c17b6284daaa1e4d34b908ed7a7f4a7bbff3988..75ff1092b7ee405cabfd56b5e1c44db4619c5748 100644 (file)
@@ -28,7 +28,7 @@
 
 #include "fm10k.h"
 
-#define DRV_VERSION    "0.15.2-k"
+#define DRV_VERSION    "0.19.3-k"
 const char fm10k_driver_version[] = DRV_VERSION;
 char fm10k_driver_name[] = "fm10k";
 static const char fm10k_driver_string[] =
@@ -917,7 +917,7 @@ static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags)
        /* set timestamping bits */
        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
            likely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
-                       desc_flags |= FM10K_TXD_FLAG_TIME;
+               desc_flags |= FM10K_TXD_FLAG_TIME;
 
        /* set checksum offload bits */
        desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM,
@@ -1462,7 +1462,7 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
         * allow the budget to go below 1 because we'll exit polling
         */
        if (q_vector->rx.count > 1)
-               per_ring_budget = max(budget/q_vector->rx.count, 1);
+               per_ring_budget = max(budget / q_vector->rx.count, 1);
        else
                per_ring_budget = budget;
 
@@ -1998,8 +1998,10 @@ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
 
        /* Allocate memory for queues */
        err = fm10k_alloc_q_vectors(interface);
-       if (err)
+       if (err) {
+               fm10k_reset_msix_capability(interface);
                return err;
+       }
 
        /* Map rings to devices, and map devices to physical queues */
        fm10k_assign_rings(interface);
index 2bce47490723ff3d1c355213351b7b5fb07690ea..c7fea47b8909f269382bb9f13dbbab9f4a3742ad 100644 (file)
@@ -903,7 +903,7 @@ static void fm10k_mbx_create_disconnect_hdr(struct fm10k_mbx_info *mbx)
 }
 
 /**
- *  fm10k_mbx_create_fake_disconnect_hdr - Generate a false disconnect mailbox header
+ *  fm10k_mbx_create_fake_disconnect_hdr - Generate a false disconnect mbox hdr
  *  @mbx: pointer to mailbox
  *
  *  This function creates a fake disconnect header for loading into remote
@@ -2140,6 +2140,7 @@ s32 fm10k_sm_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx,
 {
        mbx->mbx_reg = FM10K_GMBX;
        mbx->mbmem_reg = FM10K_MBMEM_PF(0);
+
        /* start out in closed state */
        mbx->state = FM10K_STATE_CLOSED;
 
index 79f6b7dd23621ec5fb4295e7c53ea0592c934d61..d9854d39576d46281a8fa60f2584d7cad46f890e 100644 (file)
@@ -608,7 +608,7 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
        unsigned int r_idx = skb->queue_mapping;
        int err;
 
-       if ((skb->protocol ==  htons(ETH_P_8021Q)) &&
+       if ((skb->protocol == htons(ETH_P_8021Q)) &&
            !skb_vlan_tag_present(skb)) {
                /* FM10K only supports hardware tagging, any tags in frame
                 * are considered 2nd level or "outer" tags
@@ -632,7 +632,7 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
                        return NETDEV_TX_OK;
                }
 
-               /* locate vlan header */
+               /* locate VLAN header */
                vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
 
                /* pull the 2 key pieces of data out of it */
@@ -705,7 +705,7 @@ static void fm10k_tx_timeout(struct net_device *netdev)
        } else {
                netif_info(interface, drv, netdev,
                           "Fake Tx hang detected with timeout of %d seconds\n",
-                          netdev->watchdog_timeo/HZ);
+                          netdev->watchdog_timeo / HZ);
 
                /* fake Tx hang - increase the kernel timeout */
                if (netdev->watchdog_timeo < TX_TIMEO_LIMIT)
@@ -778,7 +778,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
        if (!set)
                clear_bit(vid, interface->active_vlans);
 
-       /* disable the default VID on ring if we have an active VLAN */
+       /* disable the default VLAN ID on ring if we have an active VLAN */
        for (i = 0; i < interface->num_rx_queues; i++) {
                struct fm10k_ring *rx_ring = interface->rx_ring[i];
                u16 rx_vid = rx_ring->vid & (VLAN_N_VID - 1);
@@ -789,7 +789,9 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
                        rx_ring->vid &= ~FM10K_VLAN_CLEAR;
        }
 
-       /* Do not remove default VID related entries from VLAN and MAC tables */
+       /* Do not remove default VLAN ID related entries from VLAN and MAC
+        * tables
+        */
        if (!set && vid == hw->mac.default_vid)
                return 0;
 
@@ -814,7 +816,7 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
        if (err)
                goto err_out;
 
-       /* set vid prior to syncing/unsyncing the VLAN */
+       /* set VLAN ID prior to syncing/unsyncing the VLAN */
        interface->vid = vid + (set ? VLAN_N_VID : 0);
 
        /* Update the unicast and multicast address list to add/drop VLAN */
index 15d8e10c250485e04097e7abb68b55de8adea0c2..020f6dce41544cd69f21f5bbd2d14df67b01bf6b 100644 (file)
@@ -180,7 +180,8 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
 
        err = fm10k_init_queueing_scheme(interface);
        if (err) {
-               dev_err(&interface->pdev->dev, "init_queueing_scheme failed: %d\n", err);
+               dev_err(&interface->pdev->dev,
+                       "init_queueing_scheme failed: %d\n", err);
                goto reinit_err;
        }
 
@@ -600,6 +601,13 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface,
        fm10k_write_reg(hw, FM10K_PFVTCTL(reg_idx),
                        FM10K_PFVTCTL_FTAG_DESC_ENABLE);
 
+       /* Initialize XPS */
+       if (!test_and_set_bit(__FM10K_TX_XPS_INIT_DONE, &ring->state) &&
+           ring->q_vector)
+               netif_set_xps_queue(ring->netdev,
+                                   &ring->q_vector->affinity_mask,
+                                   ring->queue_index);
+
        /* enable queue */
        fm10k_write_reg(hw, FM10K_TXDCTL(reg_idx), txdctl);
 }
@@ -715,7 +723,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
        /* assign default VLAN to queue */
        ring->vid = hw->mac.default_vid;
 
-       /* if we have an active VLAN, disable default VID */
+       /* if we have an active VLAN, disable default VLAN ID */
        if (test_bit(hw->mac.default_vid, interface->active_vlans))
                ring->vid |= FM10K_VLAN_CLEAR;
 
@@ -919,7 +927,7 @@ void fm10k_netpoll(struct net_device *netdev)
 #endif
 #define FM10K_ERR_MSG(type) case (type): error = #type; break
 static void fm10k_handle_fault(struct fm10k_intfc *interface, int type,
-                             struct fm10k_fault *fault)
+                              struct fm10k_fault *fault)
 {
        struct pci_dev *pdev = interface->pdev;
        struct fm10k_hw *hw = &interface->hw;
@@ -1124,6 +1132,10 @@ void fm10k_mbx_free_irq(struct fm10k_intfc *interface)
        struct fm10k_hw *hw = &interface->hw;
        int itr_reg;
 
+       /* no mailbox IRQ to free if MSI-X is not enabled */
+       if (!interface->msix_entries)
+               return;
+
        /* disconnect the mailbox */
        hw->mbx.ops.disconnect(hw, &hw->mbx);
 
@@ -1292,7 +1304,7 @@ static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results,
        if (!fm10k_glort_valid_pf(hw, glort))
                return FM10K_ERR_PARAM;
 
-       /* verify VID is valid */
+       /* verify VLAN ID is valid */
        if (pvid >= FM10K_VLAN_TABLE_VID_MAX)
                return FM10K_ERR_PARAM;
 
@@ -1411,14 +1423,14 @@ static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface)
        }
 
        /* Enable interrupts w/ no moderation for "other" interrupts */
-       fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), other_itr);
-       fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), other_itr);
-       fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_SRAM), other_itr);
-       fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_MaxHoldTime), other_itr);
-       fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_VFLR), other_itr);
+       fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), other_itr);
+       fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), other_itr);
+       fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_sram), other_itr);
+       fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_max_hold_time), other_itr);
+       fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_vflr), other_itr);
 
        /* Enable interrupts w/ moderation for mailbox */
-       fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_Mailbox), mbx_itr);
+       fm10k_write_reg(hw, FM10K_INT_MAP(fm10k_int_mailbox), mbx_itr);
 
        /* Enable individual interrupt causes */
        fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
@@ -1446,10 +1458,15 @@ int fm10k_mbx_request_irq(struct fm10k_intfc *interface)
                err = fm10k_mbx_request_irq_pf(interface);
        else
                err = fm10k_mbx_request_irq_vf(interface);
+       if (err)
+               return err;
 
        /* connect mailbox */
-       if (!err)
-               err = hw->mbx.ops.connect(hw, &hw->mbx);
+       err = hw->mbx.ops.connect(hw, &hw->mbx);
+
+       /* if the mailbox failed to connect, then free IRQ */
+       if (err)
+               fm10k_mbx_free_irq(interface);
 
        return err;
 }
@@ -1478,8 +1495,10 @@ void fm10k_qv_free_irq(struct fm10k_intfc *interface)
                if (!q_vector->tx.count && !q_vector->rx.count)
                        continue;
 
-               /* disable interrupts */
+               /* clear the affinity_mask in the IRQ descriptor */
+               irq_set_affinity_hint(entry->vector, NULL);
 
+               /* disable interrupts */
                writel(FM10K_ITR_MASK_SET, q_vector->itr);
 
                free_irq(entry->vector, q_vector);
@@ -1537,6 +1556,9 @@ int fm10k_qv_request_irq(struct fm10k_intfc *interface)
                        goto err_out;
                }
 
+               /* assign the mask for this irq */
+               irq_set_affinity_hint(entry->vector, &q_vector->affinity_mask);
+
                /* Enable q_vector */
                writel(FM10K_ITR_ENABLE, q_vector->itr);
 
@@ -1557,8 +1579,10 @@ err_out:
                if (!q_vector->tx.count && !q_vector->rx.count)
                        continue;
 
-               /* disable interrupts */
+               /* clear the affinity_mask in the IRQ descriptor */
+               irq_set_affinity_hint(entry->vector, NULL);
 
+               /* disable interrupts */
                writel(FM10K_ITR_MASK_SET, q_vector->itr);
 
                free_irq(entry->vector, q_vector);
@@ -1857,17 +1881,18 @@ static void fm10k_slot_warn(struct fm10k_intfc *interface)
                return;
        }
 
-       if (max_gts < expected_gts) {
-               dev_warn(&interface->pdev->dev,
-                        "This device requires %dGT/s of bandwidth for optimal performance.\n",
-                        expected_gts);
-               dev_warn(&interface->pdev->dev,
-                        "A %sslot with x%d lanes is suggested.\n",
-                        (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s " :
-                         hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s " :
-                         hw->bus_caps.speed == fm10k_bus_speed_8000 ? "8.0GT/s " : ""),
-                        hw->bus_caps.width);
-       }
+       if (max_gts >= expected_gts)
+               return;
+
+       dev_warn(&interface->pdev->dev,
+                "This device requires %dGT/s of bandwidth for optimal performance.\n",
+                expected_gts);
+       dev_warn(&interface->pdev->dev,
+                "A %sslot with x%d lanes is suggested.\n",
+                (hw->bus_caps.speed == fm10k_bus_speed_2500 ? "2.5GT/s " :
+                 hw->bus_caps.speed == fm10k_bus_speed_5000 ? "5.0GT/s " :
+                 hw->bus_caps.speed == fm10k_bus_speed_8000 ? "8.0GT/s " : ""),
+                hw->bus_caps.width);
 }
 
 /**
@@ -1881,8 +1906,7 @@ static void fm10k_slot_warn(struct fm10k_intfc *interface)
  * The OS initialization, configuring of the interface private structure,
  * and a hardware reset occur.
  **/
-static int fm10k_probe(struct pci_dev *pdev,
-                      const struct pci_device_id *ent)
+static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        struct net_device *netdev;
        struct fm10k_intfc *interface;
@@ -2286,7 +2310,8 @@ static void fm10k_io_resume(struct pci_dev *pdev)
 
        err = fm10k_init_queueing_scheme(interface);
        if (err) {
-               dev_err(&interface->pdev->dev, "init_queueing_scheme failed: %d\n", err);
+               dev_err(&interface->pdev->dev,
+                       "init_queueing_scheme failed: %d\n", err);
                return;
        }
 
index 8b9b6ba5b92bfb76ff2efa95f2c07160af65611a..808307e677187c9ffb00eeb9060eb8880d100ec5 100644 (file)
@@ -266,7 +266,6 @@ static s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw)
 {
        u8 perm_addr[ETH_ALEN];
        u32 serial_num;
-       int i;
 
        serial_num = fm10k_read_reg(hw, FM10K_SM_AREA(1));
 
@@ -288,10 +287,8 @@ static s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw)
        perm_addr[4] = (u8)(serial_num >> 8);
        perm_addr[5] = (u8)(serial_num);
 
-       for (i = 0; i < ETH_ALEN; i++) {
-               hw->mac.perm_addr[i] = perm_addr[i];
-               hw->mac.addr[i] = perm_addr[i];
-       }
+       ether_addr_copy(hw->mac.perm_addr, perm_addr);
+       ether_addr_copy(hw->mac.addr, perm_addr);
 
        return 0;
 }
@@ -332,7 +329,7 @@ static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort,
        /* clear set bit from VLAN ID */
        vid &= ~FM10K_VLAN_CLEAR;
 
-       /* if glort or vlan are not valid return error */
+       /* if glort or VLAN are not valid return error */
        if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX)
                return FM10K_ERR_PARAM;
 
@@ -417,6 +414,7 @@ static s32 fm10k_update_xcast_mode_pf(struct fm10k_hw *hw, u16 glort, u8 mode)
 
        if (mode > FM10K_XCAST_MODE_NONE)
                return FM10K_ERR_PARAM;
+
        /* if glort is not valid return error */
        if (!fm10k_glort_valid_pf(hw, glort))
                return FM10K_ERR_PARAM;
@@ -924,7 +922,7 @@ err_out:
        txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
                  FM10K_TXQCTL_VF | vf_idx;
 
-       /* assign VID */
+       /* assign VLAN ID */
        for (i = 0; i < queues_per_pool; i++)
                fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl);
 
@@ -1175,14 +1173,14 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
 }
 
 /**
- * fm10k_iov_select_vid - Select correct default VID
+ * fm10k_iov_select_vid - Select correct default VLAN ID
  * @hw: Pointer to hardware structure
- * @vid: VID to correct
+ * @vid: VLAN ID to correct
  *
- * Will report an error if VID is out of range. For VID = 0, it will return
- * either the pf_vid or sw_vid depending on which one is set.
+ * Will report an error if the VLAN ID is out of range. For VID = 0, it will
+ * return either the pf_vid or sw_vid depending on which one is set.
  */
-static inline s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
+static s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
 {
        if (!vid)
                return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid;
@@ -1235,8 +1233,8 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
                err = fm10k_iov_select_vid(vf_info, (u16)vid);
                if (err < 0)
                        return err;
-               else
-                       vid = err;
+
+               vid = err;
 
                /* update VSI info for VF in regards to VLAN table */
                err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
@@ -1261,8 +1259,8 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
                err = fm10k_iov_select_vid(vf_info, vlan);
                if (err < 0)
                        return err;
-               else
-                       vlan = (u16)err;
+
+               vlan = (u16)err;
 
                /* notify switch of request for new unicast address */
                err = hw->mac.ops.update_uc_addr(hw, vf_info->glort,
@@ -1287,8 +1285,8 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
                err = fm10k_iov_select_vid(vf_info, vlan);
                if (err < 0)
                        return err;
-               else
-                       vlan = (u16)err;
+
+               vlan = (u16)err;
 
                /* notify switch of request for new multicast address */
                err = hw->mac.ops.update_mc_addr(hw, vf_info->glort,
@@ -1416,14 +1414,6 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
        return err;
 }
 
-const struct fm10k_msg_data fm10k_iov_msg_data_pf[] = {
-       FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
-       FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf),
-       FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf),
-       FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf),
-       FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
-};
-
 /**
  *  fm10k_update_stats_hw_pf - Updates hardware related statistics of PF
  *  @hw: pointer to hardware structure
@@ -1451,9 +1441,10 @@ static void fm10k_update_hw_stats_pf(struct fm10k_hw *hw,
                xec = fm10k_read_hw_stats_32b(hw, FM10K_STATS_XEC, &stats->xec);
                vlan_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_VLAN_DROP,
                                                    &stats->vlan_drop);
-               loopback_drop = fm10k_read_hw_stats_32b(hw,
-                                                       FM10K_STATS_LOOPBACK_DROP,
-                                                       &stats->loopback_drop);
+               loopback_drop =
+                       fm10k_read_hw_stats_32b(hw,
+                                               FM10K_STATS_LOOPBACK_DROP,
+                                               &stats->loopback_drop);
                nodesc_drop = fm10k_read_hw_stats_32b(hw,
                                                      FM10K_STATS_NODESC_DROP,
                                                      &stats->nodesc_drop);
@@ -1698,8 +1689,8 @@ const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = {
  *
  *  This handler configures the default VLAN for the PF
  **/
-s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results,
-                            struct fm10k_mbx_info *mbx)
+static s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results,
+                                   struct fm10k_mbx_info *mbx)
 {
        u16 glort, pvid;
        u32 pvid_update;
@@ -1718,7 +1709,7 @@ s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results,
        if (!fm10k_glort_valid_pf(hw, glort))
                return FM10K_ERR_PARAM;
 
-       /* verify VID is valid */
+       /* verify VLAN ID is valid */
        if (pvid >= FM10K_VLAN_TABLE_VID_MAX)
                return FM10K_ERR_PARAM;
 
index 40a0dbc62a04af1a8fc659b012b3fb0d8d4d0e27..a8fc512a2416432cdce32738efcbc35f3f7b4e64 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -107,8 +107,6 @@ extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[];
 #define FM10K_PF_MSG_LPORT_MAP_HANDLER(func) \
        FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_LPORT_MAP, \
                          fm10k_lport_map_msg_attr, func)
-s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *, u32 **,
-                            struct fm10k_mbx_info *);
 extern const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[];
 #define FM10K_PF_MSG_UPDATE_PVID_HANDLER(func) \
        FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_UPDATE_PVID, \
@@ -129,7 +127,6 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **,
                              struct fm10k_mbx_info *);
 s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *, u32 **,
                                 struct fm10k_mbx_info *);
-extern const struct fm10k_msg_data fm10k_iov_msg_data_pf[];
 
 extern struct fm10k_info fm10k_pf_info;
 #endif /* _FM10K_PF_H */
index 9b29d7b0377a4302aea542a2afd99460707241f1..95afb5c0c9c4c815b7043710ec0ff25c090b3db3 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -48,8 +48,8 @@ s32 fm10k_tlv_msg_init(u32 *msg, u16 msg_id)
  *  the attribute buffer.  It will return success if provided with a valid
  *  pointers.
  **/
-s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id,
-                                  const unsigned char *string)
+static s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id,
+                                         const unsigned char *string)
 {
        u32 attr_data = 0, len = 0;
        u32 *attr;
@@ -98,7 +98,7 @@ s32 fm10k_tlv_attr_put_null_string(u32 *msg, u16 attr_id,
  *  it in the array pointed by by string.  It will return success if provided
  *  with a valid pointers.
  **/
-s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string)
+static s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string)
 {
        u32 len;
 
@@ -353,7 +353,7 @@ s32 fm10k_tlv_attr_get_le_struct(u32 *attr, void *le_struct, u32 len)
  *  function will return NULL on failure, and a pointer to the start
  *  of the nested attributes on success.
  **/
-u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id)
+static u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id)
 {
        u32 *attr;
 
@@ -370,7 +370,7 @@ u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id)
 }
 
 /**
- *  fm10k_tlv_attr_nest_start - Start a set of nested attributes
+ *  fm10k_tlv_attr_nest_stop - Stop a set of nested attributes
  *  @msg: Pointer to message block
  *
  *  This function closes off an existing set of nested attributes.  The
@@ -378,7 +378,7 @@ u32 *fm10k_tlv_attr_nest_start(u32 *msg, u16 attr_id)
  *  the case of a nest within the nest this would be the outer nest pointer.
  *  This function will return success provided all pointers are valid.
  **/
-s32 fm10k_tlv_attr_nest_stop(u32 *msg)
+static s32 fm10k_tlv_attr_nest_stop(u32 *msg)
 {
        u32 *attr;
        u32 len;
@@ -483,8 +483,8 @@ static s32 fm10k_tlv_attr_validate(u32 *attr,
  *  FM10K_NOT_IMPLEMENTED for any attribute that is outside of the array
  *  and 0 on success.
  **/
-s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
-                        const struct fm10k_tlv_attr *tlv_attr)
+static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
+                               const struct fm10k_tlv_attr *tlv_attr)
 {
        u32 i, attr_id, offset = 0;
        s32 err = 0;
index 7e045e8bf1ebaedfcc29a8350019d5cecbfdd9c7..d5ad359c1d54b6e4cff17a2eecee2ba9a3a69723 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+ * Copyright(c) 2013 - 2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -106,8 +106,6 @@ struct fm10k_msg_data {
 #define FM10K_MSG_HANDLER(id, attr, func) { id, attr, func }
 
 s32 fm10k_tlv_msg_init(u32 *, u16);
-s32 fm10k_tlv_attr_put_null_string(u32 *, u16, const unsigned char *);
-s32 fm10k_tlv_attr_get_null_string(u32 *, unsigned char *);
 s32 fm10k_tlv_attr_put_mac_vlan(u32 *, u16, const u8 *, u16);
 s32 fm10k_tlv_attr_get_mac_vlan(u32 *, u8 *, u16 *);
 s32 fm10k_tlv_attr_put_bool(u32 *, u16);
@@ -147,9 +145,6 @@ s32 fm10k_tlv_attr_get_value(u32 *, void *, u32);
                fm10k_tlv_attr_get_value(attr, ptr, sizeof(s64))
 s32 fm10k_tlv_attr_put_le_struct(u32 *, u16, const void *, u32);
 s32 fm10k_tlv_attr_get_le_struct(u32 *, void *, u32);
-u32 *fm10k_tlv_attr_nest_start(u32 *, u16);
-s32 fm10k_tlv_attr_nest_stop(u32 *);
-s32 fm10k_tlv_attr_parse(u32 *, u32 **, const struct fm10k_tlv_attr *);
 s32 fm10k_tlv_msg_parse(struct fm10k_hw *, u32 *, struct fm10k_mbx_info *,
                        const struct fm10k_msg_data *);
 s32 fm10k_tlv_msg_error(struct fm10k_hw *hw, u32 **results,
index 02727250ce1f5919ef0d0a87953e4222719018b6..098883d2875f6f4e3f34a8a424788e0a025b67dc 100644 (file)
@@ -354,7 +354,7 @@ struct fm10k_hw;
 #define FM10K_VLAN_TABLE_VID_MAX               4096
 #define FM10K_VLAN_TABLE_VSI_MAX               64
 #define FM10K_VLAN_LENGTH_SHIFT                        16
-#define FM10K_VLAN_CLEAR                       (1 << 15)
+#define FM10K_VLAN_CLEAR                       BIT(15)
 #define FM10K_VLAN_ALL \
        ((FM10K_VLAN_TABLE_VID_MAX - 1) << FM10K_VLAN_LENGTH_SHIFT)
 
@@ -388,13 +388,13 @@ struct fm10k_hw;
 #define FM10K_SW_SYSTIME_PULSE(_n)     ((_n) + 0x02252)
 
 enum fm10k_int_source {
-       fm10k_int_Mailbox       = 0,
-       fm10k_int_PCIeFault     = 1,
-       fm10k_int_SwitchUpDown  = 2,
-       fm10k_int_SwitchEvent   = 3,
-       fm10k_int_SRAM          = 4,
-       fm10k_int_VFLR          = 5,
-       fm10k_int_MaxHoldTime   = 6,
+       fm10k_int_mailbox               = 0,
+       fm10k_int_pcie_fault            = 1,
+       fm10k_int_switch_up_down        = 2,
+       fm10k_int_switch_event          = 3,
+       fm10k_int_sram                  = 4,
+       fm10k_int_vflr                  = 5,
+       fm10k_int_max_hold_time         = 6,
        fm10k_int_sources_max_pf
 };
 
index 2af697df5abc1b58a716c2cf618ca3582508ad51..5445c0fab49f2ad9073ba4bf507ebc9af6e299fd 100644 (file)
@@ -442,6 +442,7 @@ static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode)
 
        if (mode > FM10K_XCAST_MODE_NONE)
                return FM10K_ERR_PARAM;
+
        /* generate message requesting to change xcast mode */
        fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
        fm10k_tlv_attr_put_u8(msg, FM10K_LPORT_STATE_MSG_XCAST_MODE, mode);
index bd6d9c002accacd9dba8a1628291688e3352e2a1..b7bc014ae00b7b9e89a36c74c357618178ef1e16 100644 (file)
@@ -776,6 +776,8 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
                                             bool is_vf, bool is_netdev);
+int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
+                         bool is_vf, bool is_netdev);
 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
                                      bool is_vf, bool is_netdev);
index 61a49793594120b9cd4f006749af5f5ae0d962f8..b22012a446a6e127fc7ed83665e3d629f441e4f2 100644 (file)
@@ -227,6 +227,7 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_nvm_update                 = 0x0703,
        i40e_aqc_opc_nvm_config_read            = 0x0704,
        i40e_aqc_opc_nvm_config_write           = 0x0705,
+       i40e_aqc_opc_oem_post_update            = 0x0720,
 
        /* virtualization commands */
        i40e_aqc_opc_send_msg_to_pf             = 0x0801,
@@ -1891,6 +1892,26 @@ struct i40e_aqc_nvm_config_data_immediate_field {
 
 I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
 
+/* OEM Post Update (indirect 0x0720)
+ * no command data struct used
+ */
+struct i40e_aqc_nvm_oem_post_update {
+#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA      0x01
+       u8 sel_data;
+       u8 reserved[7];
+};
+
+I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update);
+
+struct i40e_aqc_nvm_oem_post_update_buffer {
+       u8 str_len;
+       u8 dev_addr;
+       __le16 eeprom_addr;
+       u8 data[36];
+};
+
+I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer);
+
 /* Send to PF command (indirect 0x0801) id is only used by PF
  * Send to VF command (indirect 0x0802) id is only used by PF
  * Send to Peer PF command (indirect 0x0803)
index 79ae7beeafe5afa17531083ba8e933fb15a43f9b..daa9204426d42b9cc74749800d62413bc0ddbf5a 100644 (file)
@@ -762,7 +762,7 @@ static void i40e_write_byte(u8 *hmc_bits,
 
        /* prepare the bits and mask */
        shift_width = ce_info->lsb % 8;
-       mask = BIT(ce_info->width) - 1;
+       mask = (u8)(BIT(ce_info->width) - 1);
 
        src_byte = *from;
        src_byte &= mask;
index 2b1b655a3b525117b7242632f5776b44e66cdc57..b118deb08ce60f48f6a306d7c86bc7621c5786e8 100644 (file)
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
 
 #define DRV_VERSION_MAJOR 1
 #define DRV_VERSION_MINOR 4
-#define DRV_VERSION_BUILD 7
+#define DRV_VERSION_BUILD 8
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -1258,6 +1258,42 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
                                        struct i40e_mac_filter, list);
 }
 
+/**
+ * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
+ * @vsi: the VSI to be searched
+ * @macaddr: the mac address to be removed
+ * @is_vf: true if it is a VF
+ * @is_netdev: true if it is a netdev
+ *
+ * Removes a given MAC address from a VSI, regardless of VLAN
+ *
+ * Returns 0 for success, or error
+ **/
+int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
+                         bool is_vf, bool is_netdev)
+{
+       struct i40e_mac_filter *f = NULL;
+       int changed = 0;
+
+       WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
+            "Missing mac_filter_list_lock\n");
+       list_for_each_entry(f, &vsi->mac_filter_list, list) {
+               if ((ether_addr_equal(macaddr, f->macaddr)) &&
+                   (is_vf == f->is_vf) &&
+                   (is_netdev == f->is_netdev)) {
+                       f->counter--;
+                       f->changed = true;
+                       changed = 1;
+               }
+       }
+       if (changed) {
+               vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+               vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+               return 0;
+       }
+       return -ENOENT;
+}
+
 /**
  * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
  * @vsi: the PF Main VSI - inappropriate for any other VSI
@@ -1531,7 +1567,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
        if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
                /* Find numtc from enabled TC bitmap */
                for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-                       if (enabled_tc & BIT_ULL(i)) /* TC is enabled */
+                       if (enabled_tc & BIT(i)) /* TC is enabled */
                                numtc++;
                }
                if (!numtc) {
@@ -1560,7 +1596,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
        /* Setup queue offset/count for all TCs for given VSI */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
                /* See if the given TC is enabled for the given VSI */
-               if (vsi->tc_config.enabled_tc & BIT_ULL(i)) {
+               if (vsi->tc_config.enabled_tc & BIT(i)) {
                        /* TC is enabled */
                        int pow, num_qps;
 
@@ -1880,11 +1916,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 
        /* Now process 'del_list' outside the lock */
        if (!list_empty(&tmp_del_list)) {
+               int del_list_size;
+
                filter_list_len = pf->hw.aq.asq_buf_size /
                            sizeof(struct i40e_aqc_remove_macvlan_element_data);
-               del_list = kcalloc(filter_list_len,
-                           sizeof(struct i40e_aqc_remove_macvlan_element_data),
-                           GFP_KERNEL);
+               del_list_size = filter_list_len *
+                           sizeof(struct i40e_aqc_remove_macvlan_element_data);
+               del_list = kzalloc(del_list_size, GFP_KERNEL);
                if (!del_list) {
                        i40e_cleanup_add_list(&tmp_add_list);
 
@@ -1919,7 +1957,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
                                                                NULL);
                                aq_err = pf->hw.aq.asq_last_status;
                                num_del = 0;
-                               memset(del_list, 0, sizeof(*del_list));
+                               memset(del_list, 0, del_list_size);
 
                                if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) {
                                        retval = -EIO;
@@ -1955,13 +1993,14 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
        }
 
        if (!list_empty(&tmp_add_list)) {
+               int add_list_size;
 
                /* do all the adds now */
                filter_list_len = pf->hw.aq.asq_buf_size /
                               sizeof(struct i40e_aqc_add_macvlan_element_data),
-               add_list = kcalloc(filter_list_len,
-                              sizeof(struct i40e_aqc_add_macvlan_element_data),
-                              GFP_KERNEL);
+               add_list_size = filter_list_len *
+                              sizeof(struct i40e_aqc_add_macvlan_element_data);
+               add_list = kzalloc(add_list_size, GFP_KERNEL);
                if (!add_list) {
                        /* Purge element from temporary lists */
                        i40e_cleanup_add_list(&tmp_add_list);
@@ -2000,7 +2039,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 
                                if (aq_ret)
                                        break;
-                               memset(add_list, 0, sizeof(*add_list));
+                               memset(add_list, 0, add_list_size);
                        }
                        /* Entries from tmp_add_list were cloned from MAC
                         * filter list, hence clean those cloned entries
@@ -4433,7 +4472,7 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
                if (app.selector == I40E_APP_SEL_TCPIP &&
                    app.protocolid == I40E_APP_PROTOID_ISCSI) {
                        tc = dcbcfg->etscfg.prioritytable[app.priority];
-                       enabled_tc |= BIT_ULL(tc);
+                       enabled_tc |= BIT(tc);
                        break;
                }
        }
@@ -4517,7 +4556,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
        /* At least have TC0 */
        enabled_tc = (enabled_tc ? enabled_tc : 0x1);
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-               if (enabled_tc & BIT_ULL(i))
+               if (enabled_tc & BIT(i))
                        num_tc++;
        }
        return num_tc;
@@ -4539,7 +4578,7 @@ static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
 
        /* Find the first enabled TC */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-               if (enabled_tc & BIT_ULL(i))
+               if (enabled_tc & BIT(i))
                        break;
        }
 
@@ -4699,7 +4738,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
                 * will set the numtc for netdev as 2 that will be
                 * referenced by the netdev layer as TC 0 and 1.
                 */
-               if (vsi->tc_config.enabled_tc & BIT_ULL(i))
+               if (vsi->tc_config.enabled_tc & BIT(i))
                        netdev_set_tc_queue(netdev,
                                        vsi->tc_config.tc_info[i].netdev_tc,
                                        vsi->tc_config.tc_info[i].qcount,
@@ -4761,7 +4800,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
 
        /* Enable ETS TCs with equal BW Share for now across all VSIs */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-               if (enabled_tc & BIT_ULL(i))
+               if (enabled_tc & BIT(i))
                        bw_share[i] = 1;
        }
 
@@ -4835,7 +4874,7 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
 
        /* Enable ETS TCs with equal BW Share for now */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-               if (enabled_tc & BIT_ULL(i))
+               if (enabled_tc & BIT(i))
                        bw_data.tc_bw_share_credits[i] = 1;
        }
 
@@ -5232,7 +5271,7 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc)
 
        /* Generate TC map for number of tc requested */
        for (i = 0; i < tc; i++)
-               enabled_tc |= BIT_ULL(i);
+               enabled_tc |= BIT(i);
 
        /* Requesting same TC configuration as already enabled */
        if (enabled_tc == vsi->tc_config.enabled_tc)
@@ -6096,23 +6135,23 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
 
        rtnl_lock();
        if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
-               reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED);
+               reset_flags |= BIT(__I40E_REINIT_REQUESTED);
                clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
-               reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED);
+               reset_flags |= BIT(__I40E_PF_RESET_REQUESTED);
                clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
-               reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED);
+               reset_flags |= BIT(__I40E_CORE_RESET_REQUESTED);
                clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
-               reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED);
+               reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED);
                clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
-               reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED);
+               reset_flags |= BIT(__I40E_DOWN_REQUESTED);
                clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
        }
 
@@ -6183,15 +6222,18 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
        val = rd32(&pf->hw, pf->hw.aq.arq.len);
        oldval = val;
        if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
-               dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
+               if (hw->debug_mask & I40E_DEBUG_AQ)
+                       dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
                val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
        }
        if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
-               dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
+               if (hw->debug_mask & I40E_DEBUG_AQ)
+                       dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
                val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
        }
        if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
-               dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
+               if (hw->debug_mask & I40E_DEBUG_AQ)
+                       dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
                val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
        }
        if (oldval != val)
@@ -6200,15 +6242,18 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
        val = rd32(&pf->hw, pf->hw.aq.asq.len);
        oldval = val;
        if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
-               dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
+               if (pf->hw.debug_mask & I40E_DEBUG_AQ)
+                       dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
                val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
        }
        if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
-               dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
+               if (pf->hw.debug_mask & I40E_DEBUG_AQ)
+                       dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
                val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
        }
        if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
-               dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
+               if (pf->hw.debug_mask & I40E_DEBUG_AQ)
+                       dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
                val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
        }
        if (oldval != val)
@@ -6259,6 +6304,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
                        break;
                case i40e_aqc_opc_nvm_erase:
                case i40e_aqc_opc_nvm_update:
+               case i40e_aqc_opc_oem_post_update:
                        i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n");
                        break;
                default:
@@ -10567,7 +10613,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        /* NVM bit on means WoL disabled for the port */
        i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
-       if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1)
+       if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1)
                pf->wol_en = false;
        else
                pf->wol_en = true;
index ae879826084b790a467f9eed01196f430daf47ae..3226946bf3d44076daacaf18fc6ad71158a57fca 100644 (file)
@@ -153,6 +153,7 @@ struct i40e_virtchnl_vsi_resource {
 #define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR     0x00000020
 #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN          0x00010000
 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING    0x00020000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
 
 struct i40e_virtchnl_vf_resource {
        u16 num_vsis;
index b3bd81c3e1ceec5d704420adc966f966e10df98f..aa58a498c239e15e666d7b807a389d08b4dddb20 100644 (file)
@@ -549,12 +549,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
                        i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
 
                spin_lock_bh(&vsi->mac_filter_list_lock);
-               f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
-                                   vf->port_vlan_id ? vf->port_vlan_id : -1,
-                                   true, false);
-               if (!f)
-                       dev_info(&pf->pdev->dev,
-                                "Could not allocate VF MAC addr\n");
+               if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
+                       f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
+                                      vf->port_vlan_id ? vf->port_vlan_id : -1,
+                                      true, false);
+                       if (!f)
+                               dev_info(&pf->pdev->dev,
+                                        "Could not add MAC filter %pM for VF %d\n",
+                                       vf->default_lan_addr.addr, vf->vf_id);
+               }
                f = i40e_add_filter(vsi, brdcast,
                                    vf->port_vlan_id ? vf->port_vlan_id : -1,
                                    true, false);
@@ -1683,8 +1686,12 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        spin_lock_bh(&vsi->mac_filter_list_lock);
        /* delete addresses from the list */
        for (i = 0; i < al->num_elements; i++)
-               i40e_del_filter(vsi, al->list[i].addr,
-                               I40E_VLAN_ANY, true, false);
+               if (i40e_del_mac_all_vlan(vsi, al->list[i].addr, true, false)) {
+                       ret = I40E_ERR_INVALID_MAC_ADDR;
+                       spin_unlock_bh(&vsi->mac_filter_list_lock);
+                       goto error_param;
+               }
+
        spin_unlock_bh(&vsi->mac_filter_list_lock);
 
        /* program the updated filter list */
index 1c76389bd8882699de5e5a8f7de070982535d5fc..f5b2b369dc7ce883820faae432e1f10947e29d14 100644 (file)
@@ -227,6 +227,7 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_nvm_update                 = 0x0703,
        i40e_aqc_opc_nvm_config_read            = 0x0704,
        i40e_aqc_opc_nvm_config_write           = 0x0705,
+       i40e_aqc_opc_oem_post_update            = 0x0720,
 
        /* virtualization commands */
        i40e_aqc_opc_send_msg_to_pf             = 0x0801,
@@ -1888,6 +1889,26 @@ struct i40e_aqc_nvm_config_data_immediate_field {
 
 I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
 
+/* OEM Post Update (indirect 0x0720)
+ * no command data struct used
+ */
+ struct i40e_aqc_nvm_oem_post_update {
+#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA      0x01
+       u8 sel_data;
+       u8 reserved[7];
+};
+
+I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update);
+
+struct i40e_aqc_nvm_oem_post_update_buffer {
+       u8 str_len;
+       u8 dev_addr;
+       __le16 eeprom_addr;
+       u8 data[36];
+};
+
+I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer);
+
 /* Send to PF command (indirect 0x0801) id is only used by PF
  * Send to VF command (indirect 0x0802) id is only used by PF
  * Send to Peer PF command (indirect 0x0803)
index 9f7b279b9d9c8f827dec988b25801f26f0386f7c..3b9d2037456cc5cbed60ab2ecd41569583712fe2 100644 (file)
@@ -153,6 +153,7 @@ struct i40e_virtchnl_vsi_resource {
 #define I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR     0x00000020
 #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN          0x00010000
 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING    0x00020000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
 
 struct i40e_virtchnl_vf_resource {
        u16 num_vsis;
index b4c632f417f68a6a104ef98d1a07e6b38339f527..455394cf7f80c3199117587ca896510cb23bb622 100644 (file)
@@ -34,7 +34,15 @@ char i40evf_driver_name[] = "i40evf";
 static const char i40evf_driver_string[] =
        "Intel(R) XL710/X710 Virtual Function Network Driver";
 
-#define DRV_VERSION "1.4.3"
+#define DRV_KERN "-k"
+
+#define DRV_VERSION_MAJOR 1
+#define DRV_VERSION_MINOR 4
+#define DRV_VERSION_BUILD 4
+#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
+            __stringify(DRV_VERSION_MINOR) "." \
+            __stringify(DRV_VERSION_BUILD) \
+            DRV_KERN
 const char i40evf_driver_version[] = DRV_VERSION;
 static const char i40evf_copyright[] =
        "Copyright (c) 2013 - 2015 Intel Corporation.";
@@ -2034,6 +2042,9 @@ void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
 {
        int i;
 
+       if (!adapter->tx_rings)
+               return;
+
        for (i = 0; i < adapter->num_active_queues; i++)
                if (adapter->tx_rings[i].desc)
                        i40evf_free_tx_resources(&adapter->tx_rings[i]);
@@ -2102,6 +2113,9 @@ void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
 {
        int i;
 
+       if (!adapter->rx_rings)
+               return;
+
        for (i = 0; i < adapter->num_active_queues; i++)
                if (adapter->rx_rings[i].desc)
                        i40evf_free_rx_resources(&adapter->rx_rings[i]);
index 3c9c008b168b752d4ab209dc3203edce7058b4ec..c1c5262837572fdfb00d62f3e322c2f6ac7d14f1 100644 (file)
@@ -157,7 +157,9 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
               I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
               I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
               I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
-              I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
+              I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
+              I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
+
        adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
        adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
        if (PF_IS_V11(adapter))
index 7a73510e547cd49f38629b4d60fbab8b8dce945a..362911d024b5d8237070413cc722d65adab58451 100644 (file)
@@ -272,6 +272,11 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
                        if (ret_val)
                                goto out;
                }
+               if (phy->id == M88E1543_E_PHY_ID) {
+                       ret_val = igb_initialize_M88E1543_phy(hw);
+                       if (ret_val)
+                               goto out;
+               }
                break;
        case IGP03E1000_E_PHY_ID:
                phy->type = e1000_phy_igp_3;
@@ -294,6 +299,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
        case I210_I_PHY_ID:
                phy->type               = e1000_phy_i210;
                phy->ops.check_polarity = igb_check_polarity_m88;
+               phy->ops.get_cfg_done   = igb_get_cfg_done_i210;
                phy->ops.get_phy_info   = igb_get_phy_info_m88;
                phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
                phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
@@ -925,6 +931,8 @@ static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
 
        if (phy->id == M88E1512_E_PHY_ID)
                ret_val = igb_initialize_M88E1512_phy(hw);
+       if (phy->id == M88E1543_E_PHY_ID)
+               ret_val = igb_initialize_M88E1543_phy(hw);
 out:
        return ret_val;
 }
index b1915043bc0cfefbe416b6bf5ca59658a8f47278..a61ee9462dd48583129d839c79a9628d8480d780 100644 (file)
 #define E1000_M88E1543_PAGE_ADDR       0x16       /* Page Offset Register */
 #define E1000_M88E1543_EEE_CTRL_1      0x0
 #define E1000_M88E1543_EEE_CTRL_1_MS   0x0001     /* EEE Master/Slave */
+#define E1000_M88E1543_FIBER_CTRL      0x0
 #define E1000_EEE_ADV_DEV_I354         7
 #define E1000_EEE_ADV_ADDR_I354                60
 #define E1000_EEE_ADV_100_SUPPORTED    (1 << 1)   /* 100BaseTx EEE Supported */
index 65d931669f813bbcca0a21cc13a68c53663b03ee..29f59c76878a59a61e4e7d61a084b25db33e8726 100644 (file)
@@ -900,3 +900,30 @@ s32 igb_pll_workaround_i210(struct e1000_hw *hw)
        wr32(E1000_MDICNFG, mdicnfg);
        return ret_val;
 }
+
+/**
+ *  igb_get_cfg_done_i210 - Read config done bit
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the management control register for the config done bit for
+ *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
+ *  to read the config done bit, so an error is *ONLY* logged and returns
+ *  0.  If we were to return with error, EEPROM-less silicon
+ *  would not be able to be reset or change link.
+ **/
+s32 igb_get_cfg_done_i210(struct e1000_hw *hw)
+{
+       s32 timeout = PHY_CFG_TIMEOUT;
+       u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+       while (timeout) {
+               if (rd32(E1000_EEMNGCTL_I210) & mask)
+                       break;
+               usleep_range(1000, 2000);
+               timeout--;
+       }
+       if (!timeout)
+               hw_dbg("MNG configuration cycle has not completed.\n");
+
+       return 0;
+}
index 3442b6357d01211d9edd310f1b8339202ae5af19..eaa68a50cb3b7e7bda4db83c666725b224b80bf0 100644 (file)
@@ -34,6 +34,7 @@ s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
 s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
 bool igb_get_flash_presence_i210(struct e1000_hw *hw);
 s32 igb_pll_workaround_i210(struct e1000_hw *hw);
+s32 igb_get_cfg_done_i210(struct e1000_hw *hw);
 
 #define E1000_STM_OPCODE               0xDB00
 #define E1000_EEPROM_FLASH_SIZE_WORD   0x11
index 23ec28f43f6d3d354094655c7c696c5f7e3bfb1b..c0df40f2b29585fc52d2edc23ec10b4ed192e262 100644 (file)
@@ -2277,6 +2277,100 @@ out:
        return ret_val;
 }
 
+/**
+ *  igb_initialize_M88E1543_phy - Initialize M88E1512 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize Marvell 1543 to work correctly with Avoton.
+ **/
+s32 igb_initialize_M88E1543_phy(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val = 0;
+
+       /* Switch to PHY page 0xFF. */
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xDC0C);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159);
+       if (ret_val)
+               goto out;
+
+       /* Switch to PHY page 0xFB. */
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x0C0D);
+       if (ret_val)
+               goto out;
+
+       /* Switch to PHY page 0x12. */
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12);
+       if (ret_val)
+               goto out;
+
+       /* Change mode to SGMII-to-Copper */
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001);
+       if (ret_val)
+               goto out;
+
+       /* Switch to PHY page 1. */
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x1);
+       if (ret_val)
+               goto out;
+
+       /* Change mode to 1000BASE-X/SGMII and autoneg enable */
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1543_FIBER_CTRL, 0x9140);
+       if (ret_val)
+               goto out;
+
+       /* Return the PHY to page 0. */
+       ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
+       if (ret_val)
+               goto out;
+
+       ret_val = igb_phy_sw_reset(hw);
+       if (ret_val) {
+               hw_dbg("Error committing the PHY changes\n");
+               return ret_val;
+       }
+
+       /* msec_delay(1000); */
+       usleep_range(1000, 2000);
+out:
+       return ret_val;
+}
+
 /**
  * igb_power_up_phy_copper - Restore copper link in case of PHY power down
  * @hw: pointer to the HW structure
index 24d55edbb0e3a8b58290f94e2c34f3708d8c0111..aa1ae61a61d86378273d21f4b072df8e210652c8 100644 (file)
@@ -62,6 +62,7 @@ void igb_power_up_phy_copper(struct e1000_hw *hw);
 void igb_power_down_phy_copper(struct e1000_hw *hw);
 s32  igb_phy_init_script_igp3(struct e1000_hw *hw);
 s32  igb_initialize_M88E1512_phy(struct e1000_hw *hw);
+s32  igb_initialize_M88E1543_phy(struct e1000_hw *hw);
 s32  igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
 s32  igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
 s32  igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
index 4af2870e49f88aaa67559e6d454328eb1ff1d6db..21d9d02885cb0f0083d41a004032e3f94ac81304 100644 (file)
@@ -66,6 +66,7 @@
 #define E1000_PBA      0x01000  /* Packet Buffer Allocation - RW */
 #define E1000_PBS      0x01008  /* Packet Buffer Size */
 #define E1000_EEMNGCTL 0x01010  /* MNG EEprom Control */
+#define E1000_EEMNGCTL_I210 0x12030  /* MNG EEprom Control */
 #define E1000_EEARBC_I210 0x12024  /* EEPROM Auto Read Bus Control */
 #define E1000_EEWR     0x0102C  /* EEPROM Write Register - RW */
 #define E1000_I2CCMD   0x01028  /* SFPI2C Command Register - RW */
@@ -385,8 +386,7 @@ do { \
 #define array_wr32(reg, offset, value) \
        wr32((reg) + ((offset) << 2), (value))
 
-#define array_rd32(reg, offset) \
-       (readl(hw->hw_addr + reg + ((offset) << 2)))
+#define array_rd32(reg, offset) (igb_rd32(hw, reg + ((offset) << 2)))
 
 /* DMA Coalescing registers */
 #define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
index 1a2f1cc44b2836499dc231be9edb896b8b6ddbe6..e3cb93bdb21aed5341a758694735283058f8b71f 100644 (file)
@@ -389,6 +389,8 @@ struct igb_adapter {
        u16 link_speed;
        u16 link_duplex;
 
+       u8 __iomem *io_addr; /* Mainly for iounmap use */
+
        struct work_struct reset_task;
        struct work_struct watchdog_task;
        bool fc_autoneg;
index ea7b098872456e5903bbfd5dead770eba52f69ef..7afde455326d741ad5959741519b9e22b7fac6d2 100644 (file)
@@ -946,7 +946,6 @@ static void igb_configure_msix(struct igb_adapter *adapter)
 static int igb_request_msix(struct igb_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
-       struct e1000_hw *hw = &adapter->hw;
        int i, err = 0, vector = 0, free_vector = 0;
 
        err = request_irq(adapter->msix_entries[vector].vector,
@@ -959,7 +958,7 @@ static int igb_request_msix(struct igb_adapter *adapter)
 
                vector++;
 
-               q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
+               q_vector->itr_register = adapter->io_addr + E1000_EITR(vector);
 
                if (q_vector->rx.ring && q_vector->tx.ring)
                        sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
@@ -1230,7 +1229,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
        q_vector->tx.work_limit = adapter->tx_work_limit;
 
        /* initialize ITR configuration */
-       q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0);
+       q_vector->itr_register = adapter->io_addr + E1000_EITR(0);
        q_vector->itr_val = IGB_START_ITR;
 
        /* initialize pointer to rings */
@@ -2294,9 +2293,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
 
        err = -EIO;
-       hw->hw_addr = pci_iomap(pdev, 0, 0);
-       if (!hw->hw_addr)
+       adapter->io_addr = pci_iomap(pdev, 0, 0);
+       if (!adapter->io_addr)
                goto err_ioremap;
+       /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */
+       hw->hw_addr = adapter->io_addr;
 
        netdev->netdev_ops = &igb_netdev_ops;
        igb_set_ethtool_ops(netdev);
@@ -2656,7 +2657,7 @@ err_sw_init:
 #ifdef CONFIG_PCI_IOV
        igb_disable_sriov(pdev);
 #endif
-       pci_iounmap(pdev, hw->hw_addr);
+       pci_iounmap(pdev, adapter->io_addr);
 err_ioremap:
        free_netdev(netdev);
 err_alloc_etherdev:
@@ -2823,7 +2824,7 @@ static void igb_remove(struct pci_dev *pdev)
 
        igb_clear_interrupt_scheme(adapter);
 
-       pci_iounmap(pdev, hw->hw_addr);
+       pci_iounmap(pdev, adapter->io_addr);
        if (hw->flash_address)
                iounmap(hw->flash_address);
        pci_release_selected_regions(pdev,
@@ -2856,6 +2857,13 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
        if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
                return;
 
+       /* Of the below we really only want the effect of getting
+        * IGB_FLAG_HAS_MSIX set (if available), without which
+        * igb_enable_sriov() has no effect.
+        */
+       igb_set_interrupt_capability(adapter, true);
+       igb_reset_interrupt_capability(adapter);
+
        pci_sriov_set_totalvfs(pdev, 7);
        igb_enable_sriov(pdev, max_vfs);
 
index 445b4c9169b61d642360b9b17b0f237ceb21f3dd..f4c9a42dafcfb018d3b9b34a9d8bd72d09c5fc09 100644 (file)
@@ -664,6 +664,7 @@ struct ixgbe_adapter {
 #ifdef CONFIG_IXGBE_VXLAN
 #define IXGBE_FLAG2_VXLAN_REREG_NEEDED         BIT(12)
 #endif
+#define IXGBE_FLAG2_VLAN_PROMISC               BIT(13)
 
        /* Tx fast path data */
        int num_tx_queues;
@@ -897,6 +898,7 @@ int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
                         const u8 *addr, u16 queue);
 int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
                         const u8 *addr, u16 queue);
+void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid);
 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
 netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
                                  struct ixgbe_ring *);
index 8f09d291a0430a0260cda90da03a300d5ffd7116..d8a9fb8a59e20203c6e2f8910986dd1487ff493a 100644 (file)
@@ -880,11 +880,12 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
  *  @vlan: VLAN id to write to VLAN filter
  *  @vind: VMDq output index that maps queue to VLAN id in VFTA
  *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ *  @vlvf_bypass: boolean flag - unused
  *
  *  Turn on/off specified VLAN in the VLAN filter table.
  **/
 static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
-                               bool vlan_on)
+                               bool vlan_on, bool vlvf_bypass)
 {
        u32 regindex;
        u32 bitindex;
index b8bd72589f729452a64ec739f50f6a7c5c2952d9..fa8d4f40ac2af1835605ff5fc3748ccd89311b19 100644 (file)
@@ -1083,12 +1083,16 @@ mac_reset_top:
 
        /* Add the SAN MAC address to the RAR only if it's a valid address */
        if (is_valid_ether_addr(hw->mac.san_addr)) {
-               hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
-                                   hw->mac.san_addr, 0, IXGBE_RAH_AV);
-
                /* Save the SAN MAC RAR index */
                hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
 
+               hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
+                                   hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+               /* clear VMDq pool/queue selection for this RAR */
+               hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index,
+                                      IXGBE_CLEAR_VMDQ_ALL);
+
                /* Reserve the last RAR for the SAN MAC address */
                hw->mac.num_rar_entries--;
        }
index daec6aef5dc8bffe3514f136c876559768c816c1..64045053e874b8a7b1eb27333ef8a2bd4835700c 100644 (file)
@@ -1884,10 +1884,11 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
                hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
 
                hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
-
-               /*  clear VMDq pool/queue selection for RAR 0 */
-               hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
        }
+
+       /*  clear VMDq pool/queue selection for RAR 0 */
+       hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
+
        hw->addr_ctrl.overflow_promisc = 0;
 
        hw->addr_ctrl.rar_used_count = 1;
@@ -2999,43 +3000,44 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
  *  return the VLVF index where this VLAN id should be placed
  *
  **/
-static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
+static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
 {
-       u32 bits = 0;
-       u32 first_empty_slot = 0;
-       s32 regindex;
+       s32 regindex, first_empty_slot;
+       u32 bits;
 
        /* short cut the special case */
        if (vlan == 0)
                return 0;
 
-       /*
-         * Search for the vlan id in the VLVF entries. Save off the first empty
-         * slot found along the way
-         */
-       for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
+       /* if vlvf_bypass is set we don't want to use an empty slot, we
+        * will simply bypass the VLVF if there are no entries present in the
+        * VLVF that contain our VLAN
+        */
+       first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
+
+       /* add VLAN enable bit for comparison */
+       vlan |= IXGBE_VLVF_VIEN;
+
+       /* Search for the vlan id in the VLVF entries. Save off the first empty
+        * slot found along the way.
+        *
+        * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
+        */
+       for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
                bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
-               if (!bits && !(first_empty_slot))
+               if (bits == vlan)
+                       return regindex;
+               if (!first_empty_slot && !bits)
                        first_empty_slot = regindex;
-               else if ((bits & 0x0FFF) == vlan)
-                       break;
        }
 
-       /*
-         * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
-         * in the VLVF. Else use the first empty VLVF register for this
-         * vlan id.
-         */
-       if (regindex >= IXGBE_VLVF_ENTRIES) {
-               if (first_empty_slot)
-                       regindex = first_empty_slot;
-               else {
-                       hw_dbg(hw, "No space in VLVF.\n");
-                       regindex = IXGBE_ERR_NO_SPACE;
-               }
-       }
+       /* If we are here then we didn't find the VLAN.  Return first empty
+        * slot we found during our search, else error.
+        */
+       if (!first_empty_slot)
+               hw_dbg(hw, "No space in VLVF.\n");
 
-       return regindex;
+       return first_empty_slot ? : IXGBE_ERR_NO_SPACE;
 }
 
 /**
@@ -3044,21 +3046,17 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
  *  @vlan: VLAN id to write to VLAN filter
  *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
  *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ *  @vlvf_bypass: boolean flag indicating updating default pool is okay
  *
  *  Turn on/off specified VLAN in the VLAN filter table.
  **/
 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
-                          bool vlan_on)
+                          bool vlan_on, bool vlvf_bypass)
 {
-       s32 regindex;
-       u32 bitindex;
-       u32 vfta;
-       u32 bits;
-       u32 vt;
-       u32 targetbit;
-       bool vfta_changed = false;
+       u32 regidx, vfta_delta, vfta, bits;
+       s32 vlvf_index;
 
-       if (vlan > 4095)
+       if ((vlan > 4095) || (vind > 63))
                return IXGBE_ERR_PARAM;
 
        /*
@@ -3073,22 +3071,16 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
         *    bits[11-5]: which register
         *    bits[4-0]:  which bit in the register
         */
-       regindex = (vlan >> 5) & 0x7F;
-       bitindex = vlan & 0x1F;
-       targetbit = (1 << bitindex);
-       vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
-
-       if (vlan_on) {
-               if (!(vfta & targetbit)) {
-                       vfta |= targetbit;
-                       vfta_changed = true;
-               }
-       } else {
-               if ((vfta & targetbit)) {
-                       vfta &= ~targetbit;
-                       vfta_changed = true;
-               }
-       }
+       regidx = vlan / 32;
+       vfta_delta = 1 << (vlan % 32);
+       vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
+
+       /* vfta_delta represents the difference between the current value
+        * of vfta and the value we want in the register.  Since the diff
+        * is an XOR mask we can just update vfta using an XOR.
+        */
+       vfta_delta &= vlan_on ? ~vfta : vfta;
+       vfta ^= vfta_delta;
 
        /* Part 2
         * If VT Mode is set
@@ -3098,85 +3090,67 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
         *   Or !vlan_on
         *     clear the pool bit and possibly the vind
         */
-       vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
-       if (vt & IXGBE_VT_CTL_VT_ENABLE) {
-               s32 vlvf_index;
-
-               vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
-               if (vlvf_index < 0)
-                       return vlvf_index;
-
-               if (vlan_on) {
-                       /* set the pool bit */
-                       if (vind < 32) {
-                               bits = IXGBE_READ_REG(hw,
-                                               IXGBE_VLVFB(vlvf_index*2));
-                               bits |= (1 << vind);
-                               IXGBE_WRITE_REG(hw,
-                                               IXGBE_VLVFB(vlvf_index*2),
-                                               bits);
-                       } else {
-                               bits = IXGBE_READ_REG(hw,
-                                               IXGBE_VLVFB((vlvf_index*2)+1));
-                               bits |= (1 << (vind-32));
-                               IXGBE_WRITE_REG(hw,
-                                               IXGBE_VLVFB((vlvf_index*2)+1),
-                                               bits);
-                       }
-               } else {
-                       /* clear the pool bit */
-                       if (vind < 32) {
-                               bits = IXGBE_READ_REG(hw,
-                                               IXGBE_VLVFB(vlvf_index*2));
-                               bits &= ~(1 << vind);
-                               IXGBE_WRITE_REG(hw,
-                                               IXGBE_VLVFB(vlvf_index*2),
-                                               bits);
-                               bits |= IXGBE_READ_REG(hw,
-                                               IXGBE_VLVFB((vlvf_index*2)+1));
-                       } else {
-                               bits = IXGBE_READ_REG(hw,
-                                               IXGBE_VLVFB((vlvf_index*2)+1));
-                               bits &= ~(1 << (vind-32));
-                               IXGBE_WRITE_REG(hw,
-                                               IXGBE_VLVFB((vlvf_index*2)+1),
-                                               bits);
-                               bits |= IXGBE_READ_REG(hw,
-                                               IXGBE_VLVFB(vlvf_index*2));
-                       }
-               }
+       if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
+               goto vfta_update;
+
+       vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
+       if (vlvf_index < 0) {
+               if (vlvf_bypass)
+                       goto vfta_update;
+               return vlvf_index;
+       }
 
-               /*
-                * If there are still bits set in the VLVFB registers
-                * for the VLAN ID indicated we need to see if the
-                * caller is requesting that we clear the VFTA entry bit.
-                * If the caller has requested that we clear the VFTA
-                * entry bit but there are still pools/VFs using this VLAN
-                * ID entry then ignore the request.  We're not worried
-                * about the case where we're turning the VFTA VLAN ID
-                * entry bit on, only when requested to turn it off as
-                * there may be multiple pools and/or VFs using the
-                * VLAN ID entry.  In that case we cannot clear the
-                * VFTA bit until all pools/VFs using that VLAN ID have also
-                * been cleared.  This will be indicated by "bits" being
-                * zero.
+       bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
+
+       /* set the pool bit */
+       bits |= 1 << (vind % 32);
+       if (vlan_on)
+               goto vlvf_update;
+
+       /* clear the pool bit */
+       bits ^= 1 << (vind % 32);
+
+       if (!bits &&
+           !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
+               /* Clear VFTA first, then disable VLVF.  Otherwise
+                * we run the risk of stray packets leaking into
+                * the PF via the default pool
                 */
-               if (bits) {
-                       IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
-                                       (IXGBE_VLVF_VIEN | vlan));
-                       if (!vlan_on) {
-                               /* someone wants to clear the vfta entry
-                                * but some pools/VFs are still using it.
-                                * Ignore it. */
-                               vfta_changed = false;
-                       }
-               } else {
-                       IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
-               }
+               if (vfta_delta)
+                       IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
+
+               /* disable VLVF and clear remaining bit from pool */
+               IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
+
+               return 0;
        }
 
-       if (vfta_changed)
-               IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
+       /* If there are still bits set in the VLVFB registers
+        * for the VLAN ID indicated we need to see if the
+        * caller is requesting that we clear the VFTA entry bit.
+        * If the caller has requested that we clear the VFTA
+        * entry bit but there are still pools/VFs using this VLAN
+        * ID entry then ignore the request.  We're not worried
+        * about the case where we're turning the VFTA VLAN ID
+        * entry bit on, only when requested to turn it off as
+        * there may be multiple pools and/or VFs using the
+        * VLAN ID entry.  In that case we cannot clear the
+        * VFTA bit until all pools/VFs using that VLAN ID have also
+        * been cleared.  This will be indicated by "bits" being
+        * zero.
+        */
+       vfta_delta = 0;
+
+vlvf_update:
+       /* record pool change and enable VLAN ID if not already enabled */
+       IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
+       IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
+
+vfta_update:
+       /* Update VFTA now that we are ready for traffic */
+       if (vfta_delta)
+               IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
 
        return 0;
 }
@@ -3196,8 +3170,8 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
 
        for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
                IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
-               IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
-               IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0);
        }
 
        return 0;
index a0044e4a8b90c92c49c08598423743fec070850c..2b9563137fd88f932053a9a867754fd2328d31c1 100644 (file)
@@ -92,7 +92,7 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
-                          u32 vind, bool vlan_on);
+                          u32 vind, bool vlan_on, bool vlvf_bypass);
 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
                                 ixgbe_link_speed *speed,
index d681273bd39d52ee060c14b1c6c17d7f5e427dfb..1ed4c9add00db94f5586396f8e7aa29d9861db11 100644 (file)
@@ -166,8 +166,6 @@ static int ixgbe_get_settings(struct net_device *netdev,
        /* set the supported link speeds */
        if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
                ecmd->supported |= SUPPORTED_10000baseT_Full;
-       if (supported_link & IXGBE_LINK_SPEED_2_5GB_FULL)
-               ecmd->supported |= SUPPORTED_2500baseX_Full;
        if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
                ecmd->supported |= SUPPORTED_1000baseT_Full;
        if (supported_link & IXGBE_LINK_SPEED_100_FULL)
@@ -179,8 +177,6 @@ static int ixgbe_get_settings(struct net_device *netdev,
                        ecmd->advertising |= ADVERTISED_100baseT_Full;
                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
                        ecmd->advertising |= ADVERTISED_10000baseT_Full;
-               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
-                       ecmd->advertising |= ADVERTISED_2500baseX_Full;
                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
                        ecmd->advertising |= ADVERTISED_1000baseT_Full;
        } else {
index ebd4522e7879dc44290ca3a82233a030c33f4894..66c64a3767198d78f49efe2b52bf03a99de0dc04 100644 (file)
@@ -3702,6 +3702,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
        /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
        hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
 
+       /* clear VLAN promisc flag so VFTA will be updated if necessary */
+       adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
+
        /*
         * Set up VF register offsets for selected VT Mode,
         * i.e. 32 or 64 VFs for SR-IOV
@@ -3899,12 +3902,56 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
        struct ixgbe_hw *hw = &adapter->hw;
 
        /* add VID to filter table */
-       hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true);
+       hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, true);
        set_bit(vid, adapter->active_vlans);
 
        return 0;
 }
 
+static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
+{
+       u32 vlvf;
+       int idx;
+
+       /* short cut the special case */
+       if (vlan == 0)
+               return 0;
+
+       /* Search for the vlan id in the VLVF entries */
+       for (idx = IXGBE_VLVF_ENTRIES; --idx;) {
+               vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx));
+               if ((vlvf & VLAN_VID_MASK) == vlan)
+                       break;
+       }
+
+       return idx;
+}
+
+void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 bits, word;
+       int idx;
+
+       idx = ixgbe_find_vlvf_entry(hw, vid);
+       if (!idx)
+               return;
+
+       /* See if any other pools are set for this VLAN filter
+        * entry other than the PF.
+        */
+       word = idx * 2 + (VMDQ_P(0) / 32);
+       bits = ~(1 << (VMDQ_P(0)) % 32);
+       bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
+
+       /* Disable the filter so this falls into the default pool. */
+       if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) {
+               if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
+                       IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0);
+       }
+}
+
 static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
                                  __be16 proto, u16 vid)
 {
@@ -3912,7 +3959,11 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
        struct ixgbe_hw *hw = &adapter->hw;
 
        /* remove VID from filter table */
-       hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false);
+       if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
+               ixgbe_update_pf_promisc_vlvf(adapter, vid);
+       else
+               hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
+
        clear_bit(vid, adapter->active_vlans);
 
        return 0;
@@ -3990,6 +4041,129 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
        }
 }
 
+static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 vlnctrl, i;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+       case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+       default:
+               if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
+                       break;
+               /* fall through */
+       case ixgbe_mac_82598EB:
+               /* legacy case, we can just disable VLAN filtering */
+               vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+               vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
+               IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+               return;
+       }
+
+       /* We are already in VLAN promisc, nothing to do */
+       if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
+               return;
+
+       /* Set flag so we don't redo unnecessary work */
+       adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
+
+       /* Add PF to all active pools */
+       for (i = IXGBE_VLVF_ENTRIES; --i;) {
+               u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
+               u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
+
+               vlvfb |= 1 << (VMDQ_P(0) % 32);
+               IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
+       }
+
+       /* Set all bits in the VLAN filter table array */
+       for (i = hw->mac.vft_size; i--;)
+               IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U);
+}
+
+#define VFTA_BLOCK_SIZE 8
+static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
+       u32 vid_start = vfta_offset * 32;
+       u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
+       u32 i, vid, word, bits;
+
+       for (i = IXGBE_VLVF_ENTRIES; --i;) {
+               u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
+
+               /* pull VLAN ID from VLVF */
+               vid = vlvf & VLAN_VID_MASK;
+
+               /* only concern outselves with a certain range */
+               if (vid < vid_start || vid >= vid_end)
+                       continue;
+
+               if (vlvf) {
+                       /* record VLAN ID in VFTA */
+                       vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+
+                       /* if PF is part of this then continue */
+                       if (test_bit(vid, adapter->active_vlans))
+                               continue;
+               }
+
+               /* remove PF from the pool */
+               word = i * 2 + VMDQ_P(0) / 32;
+               bits = ~(1 << (VMDQ_P(0) % 32));
+               bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
+               IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
+       }
+
+       /* extract values from active_vlans and write back to VFTA */
+       for (i = VFTA_BLOCK_SIZE; i--;) {
+               vid = (vfta_offset + i) * 32;
+               word = vid / BITS_PER_LONG;
+               bits = vid % BITS_PER_LONG;
+
+               vfta[i] |= adapter->active_vlans[word] >> bits;
+
+               IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]);
+       }
+}
+
+static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 vlnctrl, i;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+       case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+       default:
+               if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
+                       break;
+               /* fall through */
+       case ixgbe_mac_82598EB:
+               vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+               vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+               vlnctrl |= IXGBE_VLNCTRL_VFE;
+               IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+               return;
+       }
+
+       /* We are not in VLAN promisc, nothing to do */
+       if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
+               return;
+
+       /* Set flag so we don't redo unnecessary work */
+       adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
+
+       for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE)
+               ixgbe_scrub_vfta(adapter, i);
+}
+
 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
 {
        u16 vid;
@@ -4246,12 +4420,10 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
        u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
-       u32 vlnctrl;
        int count;
 
        /* Check for Promiscuous and All Multicast modes */
        fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
-       vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
 
        /* set all bits that we expect to always be set */
        fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
@@ -4261,25 +4433,18 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
 
        /* clear the bits we are changing the status of */
        fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-       vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
        if (netdev->flags & IFF_PROMISC) {
                hw->addr_ctrl.user_set_promisc = true;
                fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
                vmolr |= IXGBE_VMOLR_MPE;
-               /* Only disable hardware filter vlans in promiscuous mode
-                * if SR-IOV and VMDQ are disabled - otherwise ensure
-                * that hardware VLAN filters remain enabled.
-                */
-               if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
-                                     IXGBE_FLAG_SRIOV_ENABLED))
-                       vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
+               ixgbe_vlan_promisc_enable(adapter);
        } else {
                if (netdev->flags & IFF_ALLMULTI) {
                        fctrl |= IXGBE_FCTRL_MPE;
                        vmolr |= IXGBE_VMOLR_MPE;
                }
-               vlnctrl |= IXGBE_VLNCTRL_VFE;
                hw->addr_ctrl.user_set_promisc = false;
+               ixgbe_vlan_promisc_disable(adapter);
        }
 
        /*
@@ -4323,7 +4488,6 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
                /* NOTE:  VLAN filtering is disabled by setting PROMISC */
        }
 
-       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
        IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
 
        if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -5381,6 +5545,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
        adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
                                     hw->mac.num_rar_entries,
                                     GFP_ATOMIC);
+       if (!adapter->mac_table)
+               return -ENOMEM;
 
        /* Set MAC specific capability flags and exceptions */
        switch (hw->mac.type) {
index 31de6cf7adb010c3e98ba72a5e8589756a86f90e..eeff3d075bf81585dcc944f586a719dca740f269 100644 (file)
@@ -452,11 +452,34 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
 static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
                             u32 vf)
 {
-       /* VLAN 0 is a special case, don't allow it to be removed */
-       if (!vid && !add)
-               return 0;
+       struct ixgbe_hw *hw = &adapter->hw;
+       int err;
+
+       /* If VLAN overlaps with one the PF is currently monitoring make
+        * sure that we are able to allocate a VLVF entry.  This may be
+        * redundant but it guarantees PF will maintain visibility to
+        * the VLAN.
+        */
+       if (add && test_bit(vid, adapter->active_vlans)) {
+               err = hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), true, false);
+               if (err)
+                       return err;
+       }
 
-       return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
+       err = hw->mac.ops.set_vfta(hw, vid, vf, !!add, false);
+
+       if (add && !err)
+               return err;
+
+       /* If we failed to add the VF VLAN or we are removing the VF VLAN
+        * we may need to drop the PF pool bit in order to allow us to free
+        * up the VLVF resources.
+        */
+       if (test_bit(vid, adapter->active_vlans) ||
+           (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
+               ixgbe_update_pf_promisc_vlvf(adapter, vid);
+
+       return err;
 }
 
 static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
@@ -562,13 +585,75 @@ static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
 
        IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
 }
+
+static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 i;
+
+       /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */
+       for (i = IXGBE_VLVF_ENTRIES; i--;) {
+               u32 word = IXGBE_VLVFB(i * 2 + vf / 32);
+               u32 bits[2], vlvfb, vid, vfta, vlvf;
+               u32 mask = 1 << (vf / 32);
+
+               vlvfb = IXGBE_READ_REG(hw, word);
+
+               /* if our bit isn't set we can skip it */
+               if (!(vlvfb & mask))
+                       continue;
+
+               /* clear our bit from vlvfb */
+               vlvfb ^= mask;
+
+               /* create 64b mask to chedk to see if we should clear VLVF */
+               bits[word % 2] = vlvfb;
+               bits[(word % 2) ^ 1] = IXGBE_READ_REG(hw, word ^ 1);
+
+               /* if promisc is enabled, PF will be present, leave VFTA */
+               if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) {
+                       bits[VMDQ_P(0) / 32] &= ~(1 << (VMDQ_P(0) % 32));
+
+                       if (bits[0] || bits[1])
+                               goto update_vlvfb;
+                       goto update_vlvf;
+               }
+
+               /* if other pools are present, just remove ourselves */
+               if (bits[0] || bits[1])
+                       goto update_vlvfb;
+
+               /* if we cannot determine VLAN just remove ourselves */
+               vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
+               if (!vlvf)
+                       goto update_vlvfb;
+
+               vid = vlvf & VLAN_VID_MASK;
+               mask = 1 << (vid % 32);
+
+               /* clear bit from VFTA */
+               vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32));
+               if (vfta & mask)
+                       IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid / 32), vfta ^ mask);
+update_vlvf:
+               /* clear POOL selection enable */
+               IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0);
+update_vlvfb:
+               /* clear pool bits */
+               IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb);
+       }
+}
+
 static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
        u8 num_tcs = netdev_get_num_tc(adapter->netdev);
 
-       /* add PF assigned VLAN or VLAN 0 */
+       /* remove VLAN filters beloning to this VF */
+       ixgbe_clear_vf_vlans(adapter, vf);
+
+       /* add back PF assigned VLAN or VLAN 0 */
        ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf);
 
        /* reset offloads to defaults */
@@ -814,40 +899,14 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
        return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0;
 }
 
-static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
-{
-       u32 vlvf;
-       s32 regindex;
-
-       /* short cut the special case */
-       if (vlan == 0)
-               return 0;
-
-       /* Search for the vlan id in the VLVF entries */
-       for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
-               vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
-               if ((vlvf & VLAN_VID_MASK) == vlan)
-                       break;
-       }
-
-       /* Return a negative value if not found */
-       if (regindex >= IXGBE_VLVF_ENTRIES)
-               regindex = -1;
-
-       return regindex;
-}
-
 static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
                                 u32 *msgbuf, u32 vf)
 {
+       u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
+       u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
+       u8 tcs = netdev_get_num_tc(adapter->netdev);
        struct ixgbe_hw *hw = &adapter->hw;
-       int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
-       int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
        int err;
-       s32 reg_ndx;
-       u32 vlvf;
-       u32 bits;
-       u8 tcs = netdev_get_num_tc(adapter->netdev);
 
        if (adapter->vfinfo[vf].pf_vlan || tcs) {
                e_warn(drv,
@@ -857,54 +916,23 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
                return -1;
        }
 
-       if (add)
-               adapter->vfinfo[vf].vlan_count++;
-       else if (adapter->vfinfo[vf].vlan_count)
-               adapter->vfinfo[vf].vlan_count--;
-
-       /* in case of promiscuous mode any VLAN filter set for a VF must
-        * also have the PF pool added to it.
-        */
-       if (add && adapter->netdev->flags & IFF_PROMISC)
-               err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0));
+       /* VLAN 0 is a special case, don't allow it to be removed */
+       if (!vid && !add)
+               return 0;
 
        err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
-       if (!err && adapter->vfinfo[vf].spoofchk_enabled)
-               hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
+       if (err)
+               return err;
 
-       /* Go through all the checks to see if the VLAN filter should
-        * be wiped completely.
-        */
-       if (!add && adapter->netdev->flags & IFF_PROMISC) {
-               reg_ndx = ixgbe_find_vlvf_entry(hw, vid);
-               if (reg_ndx < 0)
-                       return err;
-               vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx));
-               /* See if any other pools are set for this VLAN filter
-                * entry other than the PF.
-                */
-               if (VMDQ_P(0) < 32) {
-                       bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2));
-                       bits &= ~(1 << VMDQ_P(0));
-                       bits |= IXGBE_READ_REG(hw,
-                                              IXGBE_VLVFB(reg_ndx * 2) + 1);
-               } else {
-                       bits = IXGBE_READ_REG(hw,
-                                             IXGBE_VLVFB(reg_ndx * 2) + 1);
-                       bits &= ~(1 << (VMDQ_P(0) - 32));
-                       bits |= IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2));
-               }
+       if (adapter->vfinfo[vf].spoofchk_enabled)
+               hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
 
-               /* If the filter was removed then ensure PF pool bit
-                * is cleared if the PF only added itself to the pool
-                * because the PF is in promiscuous mode.
-                */
-               if ((vlvf & VLAN_VID_MASK) == vid &&
-                   !test_bit(vid, adapter->active_vlans) && !bits)
-                       ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0));
-       }
+       if (add)
+               adapter->vfinfo[vf].vlan_count++;
+       else if (adapter->vfinfo[vf].vlan_count)
+               adapter->vfinfo[vf].vlan_count--;
 
-       return err;
+       return 0;
 }
 
 static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
@@ -1285,6 +1313,9 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
        if (err)
                goto out;
 
+       /* Revoke tagless access via VLAN 0 */
+       ixgbe_set_vf_vlan(adapter, false, 0, vf);
+
        ixgbe_set_vmvir(adapter, vlan, qos, vf);
        ixgbe_set_vmolr(hw, vf, false);
        if (adapter->vfinfo[vf].spoofchk_enabled)
@@ -1318,6 +1349,8 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
 
        err = ixgbe_set_vf_vlan(adapter, false,
                                adapter->vfinfo[vf].pf_vlan, vf);
+       /* Restore tagless access via VLAN 0 */
+       ixgbe_set_vf_vlan(adapter, true, 0, vf);
        ixgbe_clear_vmvir(adapter, vf);
        ixgbe_set_vmolr(hw, vf, true);
        hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
index 1329eddfc9ce7e7b1c8063549e4e9ce9b7039f0b..06add27c8b8c281a1a81ec80909fe8df0dc28452 100644 (file)
@@ -3300,7 +3300,7 @@ struct ixgbe_mac_operations {
        s32 (*enable_mc)(struct ixgbe_hw *);
        s32 (*disable_mc)(struct ixgbe_hw *);
        s32 (*clear_vfta)(struct ixgbe_hw *);
-       s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+       s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool);
        s32 (*init_uta_tables)(struct ixgbe_hw *);
        void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
        void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
index bf8225ceab8e4ff74ef102bf10379bc088807b2e..2358c1b7d5864d36335340658d94c7228574c141 100644 (file)
@@ -154,12 +154,16 @@ mac_reset_top:
 
        /* Add the SAN MAC address to the RAR only if it's a valid address */
        if (is_valid_ether_addr(hw->mac.san_addr)) {
-               hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
-                                   hw->mac.san_addr, 0, IXGBE_RAH_AV);
-
                /* Save the SAN MAC RAR index */
                hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
 
+               hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
+                                   hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+               /* clear VMDq pool/queue selection for this RAR */
+               hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index,
+                                      IXGBE_CLEAR_VMDQ_ALL);
+
                /* Reserve the last RAR for the SAN MAC address */
                hw->mac.num_rar_entries--;
        }
index 528c2544389b90c4ccd630b163c9052b4807a944..15b1f6bbd92de4a76a50a164c9b93b888dba66e3 100644 (file)
 #define MVNETA_CPU_MAP(cpu)                      (0x2540 + ((cpu) << 2))
 #define      MVNETA_CPU_RXQ_ACCESS_ALL_MASK      0x000000ff
 #define      MVNETA_CPU_TXQ_ACCESS_ALL_MASK      0x0000ff00
+#define      MVNETA_CPU_RXQ_ACCESS(rxq)                 BIT(rxq)
+#define      MVNETA_CPU_TXQ_ACCESS(txq)                 BIT(txq + 8)
 #define MVNETA_RXQ_TIME_COAL_REG(q)              (0x2580 + ((q) << 2))
 
-/* Exception Interrupt Port/Queue Cause register */
+/* Exception Interrupt Port/Queue Cause register
+ *
+ * Their behavior depend of the mapping done using the PCPX2Q
+ * registers. For a given CPU if the bit associated to a queue is not
+ * set, then for the register a read from this CPU will always return
+ * 0 and a write won't do anything
+ */
 
 #define MVNETA_INTR_NEW_CAUSE                    0x25a0
 #define MVNETA_INTR_NEW_MASK                     0x25a4
 
 #define MVNETA_TX_MTU_MAX              0x3ffff
 
+/* The RSS lookup table actually has 256 entries but we do not use
+ * them yet
+ */
+#define MVNETA_RSS_LU_TABLE_SIZE       1
+
 /* TSO header size */
 #define TSO_HEADER_SIZE 128
 
@@ -356,6 +369,7 @@ struct mvneta_port {
        struct mvneta_tx_queue *txqs;
        struct net_device *dev;
        struct notifier_block cpu_notifier;
+       int rxq_def;
 
        /* Core clock */
        struct clk *clk;
@@ -374,6 +388,8 @@ struct mvneta_port {
        unsigned int use_inband_status:1;
 
        u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
+
+       u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
 };
 
 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
@@ -499,6 +515,9 @@ struct mvneta_tx_queue {
 
        /* DMA address of TSO headers */
        dma_addr_t tso_hdrs_phys;
+
+       /* Affinity mask for CPUs*/
+       cpumask_t affinity_mask;
 };
 
 struct mvneta_rx_queue {
@@ -819,7 +838,13 @@ static void mvneta_port_up(struct mvneta_port *pp)
        mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
 
        /* Enable all initialized RXQs. */
-       mvreg_write(pp, MVNETA_RXQ_CMD, BIT(rxq_def));
+       for (queue = 0; queue < rxq_number; queue++) {
+               struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
+
+               if (rxq->descs != NULL)
+                       q_map |= (1 << queue);
+       }
+       mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
 }
 
 /* Stop the Ethernet port activity */
@@ -1025,6 +1050,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
        int cpu;
        int queue;
        u32 val;
+       int max_cpu = num_present_cpus();
 
        /* Clear all Cause registers */
        mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
@@ -1040,13 +1066,33 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
        /* Enable MBUS Retry bit16 */
        mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
 
-       /* Set CPU queue access map - all CPUs have access to all RX
-        * queues and to all TX queues
+       /* Set CPU queue access map. CPUs are assigned to the RX and
+        * TX queues modulo their number. If there is only one TX
+        * queue then it is assigned to the CPU associated to the
+        * default RX queue.
         */
-       for_each_present_cpu(cpu)
-               mvreg_write(pp, MVNETA_CPU_MAP(cpu),
-                           (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
-                            MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
+       for_each_present_cpu(cpu) {
+               int rxq_map = 0, txq_map = 0;
+               int rxq, txq;
+
+               for (rxq = 0; rxq < rxq_number; rxq++)
+                       if ((rxq % max_cpu) == cpu)
+                               rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
+
+               for (txq = 0; txq < txq_number; txq++)
+                       if ((txq % max_cpu) == cpu)
+                               txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
+
+               /* With only one TX queue we configure a special case
+                * which will allow to get all the irq on a single
+                * CPU
+                */
+               if (txq_number == 1)
+                       txq_map = (cpu == pp->rxq_def) ?
+                               MVNETA_CPU_TXQ_ACCESS(1) : 0;
+
+               mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
+       }
 
        /* Reset RX and TX DMAs */
        mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
@@ -1067,7 +1113,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
        mvreg_write(pp, MVNETA_ACC_MODE, val);
 
        /* Update val of portCfg register accordingly with all RxQueue types */
-       val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
+       val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
        mvreg_write(pp, MVNETA_PORT_CONFIG, val);
 
        val = 0;
@@ -2101,19 +2147,19 @@ static void mvneta_set_rx_mode(struct net_device *dev)
        if (dev->flags & IFF_PROMISC) {
                /* Accept all: Multicast + Unicast */
                mvneta_rx_unicast_promisc_set(pp, 1);
-               mvneta_set_ucast_table(pp, rxq_def);
-               mvneta_set_special_mcast_table(pp, rxq_def);
-               mvneta_set_other_mcast_table(pp, rxq_def);
+               mvneta_set_ucast_table(pp, pp->rxq_def);
+               mvneta_set_special_mcast_table(pp, pp->rxq_def);
+               mvneta_set_other_mcast_table(pp, pp->rxq_def);
        } else {
                /* Accept single Unicast */
                mvneta_rx_unicast_promisc_set(pp, 0);
                mvneta_set_ucast_table(pp, -1);
-               mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
+               mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
 
                if (dev->flags & IFF_ALLMULTI) {
                        /* Accept all multicast */
-                       mvneta_set_special_mcast_table(pp, rxq_def);
-                       mvneta_set_other_mcast_table(pp, rxq_def);
+                       mvneta_set_special_mcast_table(pp, pp->rxq_def);
+                       mvneta_set_other_mcast_table(pp, pp->rxq_def);
                } else {
                        /* Accept only initialized multicast */
                        mvneta_set_special_mcast_table(pp, -1);
@@ -2122,7 +2168,7 @@ static void mvneta_set_rx_mode(struct net_device *dev)
                        if (!netdev_mc_empty(dev)) {
                                netdev_for_each_mc_addr(ha, dev) {
                                        mvneta_mcast_addr_set(pp, ha->addr,
-                                                             rxq_def);
+                                                             pp->rxq_def);
                                }
                        }
                }
@@ -2173,6 +2219,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
 {
        int rx_done = 0;
        u32 cause_rx_tx;
+       int rx_queue;
        struct mvneta_port *pp = netdev_priv(napi->dev);
        struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
 
@@ -2204,8 +2251,15 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
        /* For the case where the last mvneta_poll did not process all
         * RX packets
         */
+       rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
+
        cause_rx_tx |= port->cause_rx_tx;
-       rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
+
+       if (rx_queue) {
+               rx_queue = rx_queue - 1;
+               rx_done = mvneta_rx(pp, budget, &pp->rxqs[rx_queue]);
+       }
+
        budget -= rx_done;
 
        if (budget > 0) {
@@ -2322,6 +2376,8 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp,
 static int mvneta_txq_init(struct mvneta_port *pp,
                           struct mvneta_tx_queue *txq)
 {
+       int cpu;
+
        txq->size = pp->tx_ring_size;
 
        /* A queue must always have room for at least one skb.
@@ -2374,6 +2430,14 @@ static int mvneta_txq_init(struct mvneta_port *pp,
        }
        mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
 
+       /* Setup XPS mapping */
+       if (txq_number > 1)
+               cpu = txq->id % num_present_cpus();
+       else
+               cpu = pp->rxq_def % num_present_cpus();
+       cpumask_set_cpu(cpu, &txq->affinity_mask);
+       netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
+
        return 0;
 }
 
@@ -2418,19 +2482,27 @@ static void mvneta_cleanup_txqs(struct mvneta_port *pp)
 /* Cleanup all Rx queues */
 static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
 {
-       mvneta_rxq_deinit(pp, &pp->rxqs[rxq_def]);
+       int queue;
+
+       for (queue = 0; queue < txq_number; queue++)
+               mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
 }
 
 
 /* Init all Rx queues */
 static int mvneta_setup_rxqs(struct mvneta_port *pp)
 {
-       int err = mvneta_rxq_init(pp, &pp->rxqs[rxq_def]);
-       if (err) {
-               netdev_err(pp->dev, "%s: can't create rxq=%d\n",
-                          __func__, rxq_def);
-               mvneta_cleanup_rxqs(pp);
-               return err;
+       int queue;
+
+       for (queue = 0; queue < rxq_number; queue++) {
+               int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
+
+               if (err) {
+                       netdev_err(pp->dev, "%s: can't create rxq=%d\n",
+                                  __func__, queue);
+                       mvneta_cleanup_rxqs(pp);
+                       return err;
+               }
        }
 
        return 0;
@@ -2454,6 +2526,31 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
        return 0;
 }
 
+static void mvneta_percpu_unmask_interrupt(void *arg)
+{
+       struct mvneta_port *pp = arg;
+
+       /* All the queue are unmasked, but actually only the ones
+        * maped to this CPU will be unmasked
+        */
+       mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+                   MVNETA_RX_INTR_MASK_ALL |
+                   MVNETA_TX_INTR_MASK_ALL |
+                   MVNETA_MISCINTR_INTR_MASK);
+}
+
+static void mvneta_percpu_mask_interrupt(void *arg)
+{
+       struct mvneta_port *pp = arg;
+
+       /* All the queue are masked, but actually only the ones
+        * maped to this CPU will be masked
+        */
+       mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+       mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
+       mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+}
+
 static void mvneta_start_dev(struct mvneta_port *pp)
 {
        unsigned int cpu;
@@ -2471,11 +2568,10 @@ static void mvneta_start_dev(struct mvneta_port *pp)
                napi_enable(&port->napi);
        }
 
-       /* Unmask interrupts */
-       mvreg_write(pp, MVNETA_INTR_NEW_MASK,
-                   MVNETA_RX_INTR_MASK(rxq_number) |
-                   MVNETA_TX_INTR_MASK(txq_number) |
-                   MVNETA_MISCINTR_INTR_MASK);
+       /* Unmask interrupts. It has to be done from each CPU */
+       for_each_online_cpu(cpu)
+               smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
+                                        pp, true);
        mvreg_write(pp, MVNETA_INTR_MISC_MASK,
                    MVNETA_CAUSE_PHY_STATUS_CHANGE |
                    MVNETA_CAUSE_LINK_CHANGE |
@@ -2634,7 +2730,7 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
        mvneta_mac_addr_set(pp, dev->dev_addr, -1);
 
        /* Set new addr in hw */
-       mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def);
+       mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
 
        eth_commit_mac_addr_change(dev, addr);
        return 0;
@@ -2751,22 +2847,45 @@ static void mvneta_percpu_disable(void *arg)
 
 static void mvneta_percpu_elect(struct mvneta_port *pp)
 {
-       int online_cpu_idx, cpu, i = 0;
+       int online_cpu_idx, max_cpu, cpu, i = 0;
 
-       online_cpu_idx = rxq_def % num_online_cpus();
+       online_cpu_idx = pp->rxq_def % num_online_cpus();
+       max_cpu = num_present_cpus();
 
        for_each_online_cpu(cpu) {
+               int rxq_map = 0, txq_map = 0;
+               int rxq;
+
+               for (rxq = 0; rxq < rxq_number; rxq++)
+                       if ((rxq % max_cpu) == cpu)
+                               rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
+
                if (i == online_cpu_idx)
-                       /* Enable per-CPU interrupt on the one CPU we
-                        * just elected
+                       /* Map the default receive queue queue to the
+                        * elected CPU
                         */
-                       smp_call_function_single(cpu, mvneta_percpu_enable,
-                                               pp, true);
+                       rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
+
+               /* We update the TX queue map only if we have one
+                * queue. In this case we associate the TX queue to
+                * the CPU bound to the default RX queue
+                */
+               if (txq_number == 1)
+                       txq_map = (i == online_cpu_idx) ?
+                               MVNETA_CPU_TXQ_ACCESS(1) : 0;
                else
-                       /* Disable per-CPU interrupt on all the other CPU */
-                       smp_call_function_single(cpu, mvneta_percpu_disable,
-                                               pp, true);
+                       txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
+                               MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
+
+               mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
+
+               /* Update the interrupt mask on each CPU according the
+                * new mapping
+                */
+               smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
+                                        pp, true);
                i++;
+
        }
 };
 
@@ -2801,12 +2920,22 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
                mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
                napi_enable(&port->napi);
 
+
+               /* Enable per-CPU interrupts on the CPU that is
+                * brought up.
+                */
+               smp_call_function_single(cpu, mvneta_percpu_enable,
+                                        pp, true);
+
                /* Enable per-CPU interrupt on the one CPU we care
                 * about.
                 */
                mvneta_percpu_elect(pp);
 
-               /* Unmask all ethernet port interrupts */
+               /* Unmask all ethernet port interrupts, as this
+                * notifier is called for each CPU then the CPU to
+                * Queue mapping is applied
+                */
                mvreg_write(pp, MVNETA_INTR_NEW_MASK,
                        MVNETA_RX_INTR_MASK(rxq_number) |
                        MVNETA_TX_INTR_MASK(txq_number) |
@@ -2857,7 +2986,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
 static int mvneta_open(struct net_device *dev)
 {
        struct mvneta_port *pp = netdev_priv(dev);
-       int ret;
+       int ret, cpu;
 
        pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
        pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
@@ -2887,8 +3016,13 @@ static int mvneta_open(struct net_device *dev)
         */
        mvneta_percpu_disable(pp);
 
-       /* Elect a CPU to handle our RX queue interrupt */
-       mvneta_percpu_elect(pp);
+       /* Enable per-CPU interrupt on all the CPU to handle our RX
+        * queue interrupts
+        */
+       for_each_online_cpu(cpu)
+               smp_call_function_single(cpu, mvneta_percpu_enable,
+                                        pp, true);
+
 
        /* Register a CPU notifier to handle the case where our CPU
         * might be taken offline.
@@ -3150,6 +3284,106 @@ static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
        return -EOPNOTSUPP;
 }
 
+static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
+{
+       return MVNETA_RSS_LU_TABLE_SIZE;
+}
+
+static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
+                                   struct ethtool_rxnfc *info,
+                                   u32 *rules __always_unused)
+{
+       switch (info->cmd) {
+       case ETHTOOL_GRXRINGS:
+               info->data =  rxq_number;
+               return 0;
+       case ETHTOOL_GRXFH:
+               return -EOPNOTSUPP;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int  mvneta_config_rss(struct mvneta_port *pp)
+{
+       int cpu;
+       u32 val;
+
+       netif_tx_stop_all_queues(pp->dev);
+
+       for_each_online_cpu(cpu)
+               smp_call_function_single(cpu, mvneta_percpu_mask_interrupt,
+                                        pp, true);
+
+       /* We have to synchronise on the napi of each CPU */
+       for_each_online_cpu(cpu) {
+               struct mvneta_pcpu_port *pcpu_port =
+                       per_cpu_ptr(pp->ports, cpu);
+
+               napi_synchronize(&pcpu_port->napi);
+               napi_disable(&pcpu_port->napi);
+       }
+
+       pp->rxq_def = pp->indir[0];
+
+       /* Update unicast mapping */
+       mvneta_set_rx_mode(pp->dev);
+
+       /* Update val of portCfg register accordingly with all RxQueue types */
+       val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
+       mvreg_write(pp, MVNETA_PORT_CONFIG, val);
+
+       /* Update the elected CPU matching the new rxq_def */
+       mvneta_percpu_elect(pp);
+
+       /* We have to synchronise on the napi of each CPU */
+       for_each_online_cpu(cpu) {
+               struct mvneta_pcpu_port *pcpu_port =
+                       per_cpu_ptr(pp->ports, cpu);
+
+               napi_enable(&pcpu_port->napi);
+       }
+
+       netif_tx_start_all_queues(pp->dev);
+
+       return 0;
+}
+
+static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
+                                  const u8 *key, const u8 hfunc)
+{
+       struct mvneta_port *pp = netdev_priv(dev);
+       /* We require at least one supported parameter to be changed
+        * and no change in any of the unsupported parameters
+        */
+       if (key ||
+           (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+               return -EOPNOTSUPP;
+
+       if (!indir)
+               return 0;
+
+       memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
+
+       return mvneta_config_rss(pp);
+}
+
+static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+                                  u8 *hfunc)
+{
+       struct mvneta_port *pp = netdev_priv(dev);
+
+       if (hfunc)
+               *hfunc = ETH_RSS_HASH_TOP;
+
+       if (!indir)
+               return 0;
+
+       memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
+
+       return 0;
+}
+
 static const struct net_device_ops mvneta_netdev_ops = {
        .ndo_open            = mvneta_open,
        .ndo_stop            = mvneta_stop,
@@ -3174,6 +3408,10 @@ const struct ethtool_ops mvneta_eth_tool_ops = {
        .get_strings    = mvneta_ethtool_get_strings,
        .get_ethtool_stats = mvneta_ethtool_get_stats,
        .get_sset_count = mvneta_ethtool_get_sset_count,
+       .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
+       .get_rxnfc      = mvneta_ethtool_get_rxnfc,
+       .get_rxfh       = mvneta_ethtool_get_rxfh,
+       .set_rxfh       = mvneta_ethtool_set_rxfh,
 };
 
 /* Initialize hw */
@@ -3363,6 +3601,10 @@ static int mvneta_probe(struct platform_device *pdev)
                                 strcmp(managed, "in-band-status") == 0);
        pp->cpu_notifier.notifier_call = mvneta_percpu_notifier;
 
+       pp->rxq_def = rxq_def;
+
+       pp->indir[0] = rxq_def;
+
        pp->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(pp->clk)) {
                err = PTR_ERR(pp->clk);
index a0755919ccaf0fe801e4a603f286aa1028f417ff..fe11e967095fd61df5d37c1fb07be881fbe769fe 100644 (file)
@@ -2,7 +2,7 @@ obj-$(CONFIG_MLX5_CORE)         += mlx5_core.o
 
 mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
                health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o   \
-               mad.o transobj.o vport.o sriov.o
-mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o eswitch.o \
-               en_main.o en_flow_table.o en_ethtool.o en_tx.o en_rx.o \
+               mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o
+mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \
+               en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \
                en_txrx.o
index 89313d46952ded476070808d9cbffe5e0f07b28d..f689ce580b4467a4233efff4de41ba221dcacc6f 100644 (file)
@@ -64,6 +64,8 @@
 #define MLX5E_UPDATE_STATS_INTERVAL    200 /* msecs */
 #define MLX5E_SQ_BF_BUDGET             16
 
+#define MLX5E_NUM_MAIN_GROUPS 9
+
 static const char vport_strings[][ETH_GSTRING_LEN] = {
        /* vport statistics */
        "rx_packets",
@@ -442,7 +444,7 @@ enum mlx5e_rqt_ix {
 struct mlx5e_eth_addr_info {
        u8  addr[ETH_ALEN + 2];
        u32 tt_vec;
-       u32 ft_ix[MLX5E_NUM_TT]; /* flow table index per traffic type */
+       struct mlx5_flow_rule *ft_rule[MLX5E_NUM_TT];
 };
 
 #define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
@@ -466,15 +468,22 @@ enum {
 
 struct mlx5e_vlan_db {
        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
-       u32           active_vlans_ft_ix[VLAN_N_VID];
-       u32           untagged_rule_ft_ix;
-       u32           any_vlan_rule_ft_ix;
+       struct mlx5_flow_rule   *active_vlans_rule[VLAN_N_VID];
+       struct mlx5_flow_rule   *untagged_rule;
+       struct mlx5_flow_rule   *any_vlan_rule;
        bool          filter_disabled;
 };
 
 struct mlx5e_flow_table {
-       void *vlan;
-       void *main;
+       int num_groups;
+       struct mlx5_flow_table          *t;
+       struct mlx5_flow_group          **g;
+};
+
+struct mlx5e_flow_tables {
+       struct mlx5_flow_namespace      *ns;
+       struct mlx5e_flow_table         vlan;
+       struct mlx5e_flow_table         main;
 };
 
 struct mlx5e_priv {
@@ -497,7 +506,7 @@ struct mlx5e_priv {
        u32                        rqtn[MLX5E_NUM_RQT];
        u32                        tirn[MLX5E_NUM_TT];
 
-       struct mlx5e_flow_table    ft;
+       struct mlx5e_flow_tables   fts;
        struct mlx5e_eth_addr_db   eth_addr;
        struct mlx5e_vlan_db       vlan;
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
deleted file mode 100644 (file)
index 5b93c9c..0000000
+++ /dev/null
@@ -1,1046 +0,0 @@
-/*
- * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/list.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/tcp.h>
-#include <linux/mlx5/flow_table.h>
-#include "en.h"
-
-enum {
-       MLX5E_FULLMATCH = 0,
-       MLX5E_ALLMULTI  = 1,
-       MLX5E_PROMISC   = 2,
-};
-
-enum {
-       MLX5E_UC        = 0,
-       MLX5E_MC_IPV4   = 1,
-       MLX5E_MC_IPV6   = 2,
-       MLX5E_MC_OTHER  = 3,
-};
-
-enum {
-       MLX5E_ACTION_NONE = 0,
-       MLX5E_ACTION_ADD  = 1,
-       MLX5E_ACTION_DEL  = 2,
-};
-
-struct mlx5e_eth_addr_hash_node {
-       struct hlist_node          hlist;
-       u8                         action;
-       struct mlx5e_eth_addr_info ai;
-};
-
-static inline int mlx5e_hash_eth_addr(u8 *addr)
-{
-       return addr[5];
-}
-
-static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
-{
-       struct mlx5e_eth_addr_hash_node *hn;
-       int ix = mlx5e_hash_eth_addr(addr);
-       int found = 0;
-
-       hlist_for_each_entry(hn, &hash[ix], hlist)
-               if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
-                       found = 1;
-                       break;
-               }
-
-       if (found) {
-               hn->action = MLX5E_ACTION_NONE;
-               return;
-       }
-
-       hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
-       if (!hn)
-               return;
-
-       ether_addr_copy(hn->ai.addr, addr);
-       hn->action = MLX5E_ACTION_ADD;
-
-       hlist_add_head(&hn->hlist, &hash[ix]);
-}
-
-static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
-{
-       hlist_del(&hn->hlist);
-       kfree(hn);
-}
-
-static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
-                                              struct mlx5e_eth_addr_info *ai)
-{
-       void *ft = priv->ft.main;
-
-       if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
-               mlx5_del_flow_table_entry(ft,
-                                         ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]);
-
-       if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
-               mlx5_del_flow_table_entry(ft,
-                                         ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]);
-
-       if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
-               mlx5_del_flow_table_entry(ft,
-                                         ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]);
-
-       if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
-               mlx5_del_flow_table_entry(ft,
-                                         ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]);
-
-       if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
-               mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
-
-       if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
-               mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
-
-       if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
-               mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
-
-       if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
-               mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
-
-       if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
-               mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
-
-       if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
-               mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
-
-       if (ai->tt_vec & BIT(MLX5E_TT_ANY))
-               mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
-}
-
-static int mlx5e_get_eth_addr_type(u8 *addr)
-{
-       if (is_unicast_ether_addr(addr))
-               return MLX5E_UC;
-
-       if ((addr[0] == 0x01) &&
-           (addr[1] == 0x00) &&
-           (addr[2] == 0x5e) &&
-          !(addr[3] &  0x80))
-               return MLX5E_MC_IPV4;
-
-       if ((addr[0] == 0x33) &&
-           (addr[1] == 0x33))
-               return MLX5E_MC_IPV6;
-
-       return MLX5E_MC_OTHER;
-}
-
-static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
-{
-       int eth_addr_type;
-       u32 ret;
-
-       switch (type) {
-       case MLX5E_FULLMATCH:
-               eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
-               switch (eth_addr_type) {
-               case MLX5E_UC:
-                       ret =
-                               BIT(MLX5E_TT_IPV4_TCP)       |
-                               BIT(MLX5E_TT_IPV6_TCP)       |
-                               BIT(MLX5E_TT_IPV4_UDP)       |
-                               BIT(MLX5E_TT_IPV6_UDP)       |
-                               BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
-                               BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
-                               BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
-                               BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
-                               BIT(MLX5E_TT_IPV4)           |
-                               BIT(MLX5E_TT_IPV6)           |
-                               BIT(MLX5E_TT_ANY)            |
-                               0;
-                       break;
-
-               case MLX5E_MC_IPV4:
-                       ret =
-                               BIT(MLX5E_TT_IPV4_UDP)       |
-                               BIT(MLX5E_TT_IPV4)           |
-                               0;
-                       break;
-
-               case MLX5E_MC_IPV6:
-                       ret =
-                               BIT(MLX5E_TT_IPV6_UDP)       |
-                               BIT(MLX5E_TT_IPV6)           |
-                               0;
-                       break;
-
-               case MLX5E_MC_OTHER:
-                       ret =
-                               BIT(MLX5E_TT_ANY)            |
-                               0;
-                       break;
-               }
-
-               break;
-
-       case MLX5E_ALLMULTI:
-               ret =
-                       BIT(MLX5E_TT_IPV4_UDP) |
-                       BIT(MLX5E_TT_IPV6_UDP) |
-                       BIT(MLX5E_TT_IPV4)     |
-                       BIT(MLX5E_TT_IPV6)     |
-                       BIT(MLX5E_TT_ANY)      |
-                       0;
-               break;
-
-       default: /* MLX5E_PROMISC */
-               ret =
-                       BIT(MLX5E_TT_IPV4_TCP)       |
-                       BIT(MLX5E_TT_IPV6_TCP)       |
-                       BIT(MLX5E_TT_IPV4_UDP)       |
-                       BIT(MLX5E_TT_IPV6_UDP)       |
-                       BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
-                       BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
-                       BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
-                       BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
-                       BIT(MLX5E_TT_IPV4)           |
-                       BIT(MLX5E_TT_IPV6)           |
-                       BIT(MLX5E_TT_ANY)            |
-                       0;
-               break;
-       }
-
-       return ret;
-}
-
-static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
-                                    struct mlx5e_eth_addr_info *ai, int type,
-                                    void *flow_context, void *match_criteria)
-{
-       u8 match_criteria_enable = 0;
-       void *match_value;
-       void *dest;
-       u8   *dmac;
-       u8   *match_criteria_dmac;
-       void *ft   = priv->ft.main;
-       u32  *tirn = priv->tirn;
-       u32  *ft_ix;
-       u32  tt_vec;
-       int  err;
-
-       match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
-       dmac = MLX5_ADDR_OF(fte_match_param, match_value,
-                           outer_headers.dmac_47_16);
-       match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
-                                          outer_headers.dmac_47_16);
-       dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
-
-       MLX5_SET(flow_context, flow_context, action,
-                MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
-       MLX5_SET(flow_context, flow_context, destination_list_size, 1);
-       MLX5_SET(dest_format_struct, dest, destination_type,
-                MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
-
-       switch (type) {
-       case MLX5E_FULLMATCH:
-               match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-               memset(match_criteria_dmac, 0xff, ETH_ALEN);
-               ether_addr_copy(dmac, ai->addr);
-               break;
-
-       case MLX5E_ALLMULTI:
-               match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-               match_criteria_dmac[0] = 0x01;
-               dmac[0] = 0x01;
-               break;
-
-       case MLX5E_PROMISC:
-               break;
-       }
-
-       tt_vec = mlx5e_get_tt_vec(ai, type);
-
-       ft_ix = &ai->ft_ix[MLX5E_TT_ANY];
-       if (tt_vec & BIT(MLX5E_TT_ANY)) {
-               MLX5_SET(dest_format_struct, dest, destination_id,
-                        tirn[MLX5E_TT_ANY]);
-               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
-                                               match_criteria, flow_context,
-                                               ft_ix);
-               if (err)
-                       goto err_del_ai;
-
-               ai->tt_vec |= BIT(MLX5E_TT_ANY);
-       }
-
-       match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-       MLX5_SET_TO_ONES(fte_match_param, match_criteria,
-                        outer_headers.ethertype);
-
-       ft_ix = &ai->ft_ix[MLX5E_TT_IPV4];
-       if (tt_vec & BIT(MLX5E_TT_IPV4)) {
-               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
-                        ETH_P_IP);
-               MLX5_SET(dest_format_struct, dest, destination_id,
-                        tirn[MLX5E_TT_IPV4]);
-               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
-                                               match_criteria, flow_context,
-                                               ft_ix);
-               if (err)
-                       goto err_del_ai;
-
-               ai->tt_vec |= BIT(MLX5E_TT_IPV4);
-       }
-
-       ft_ix = &ai->ft_ix[MLX5E_TT_IPV6];
-       if (tt_vec & BIT(MLX5E_TT_IPV6)) {
-               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
-                        ETH_P_IPV6);
-               MLX5_SET(dest_format_struct, dest, destination_id,
-                        tirn[MLX5E_TT_IPV6]);
-               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
-                                               match_criteria, flow_context,
-                                               ft_ix);
-               if (err)
-                       goto err_del_ai;
-
-               ai->tt_vec |= BIT(MLX5E_TT_IPV6);
-       }
-
-       MLX5_SET_TO_ONES(fte_match_param, match_criteria,
-                        outer_headers.ip_protocol);
-       MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
-                IPPROTO_UDP);
-
-       ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_UDP];
-       if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
-               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
-                        ETH_P_IP);
-               MLX5_SET(dest_format_struct, dest, destination_id,
-                        tirn[MLX5E_TT_IPV4_UDP]);
-               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
-                                               match_criteria, flow_context,
-                                               ft_ix);
-               if (err)
-                       goto err_del_ai;
-
-               ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
-       }
-
-       ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_UDP];
-       if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
-               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
-                        ETH_P_IPV6);
-               MLX5_SET(dest_format_struct, dest, destination_id,
-                        tirn[MLX5E_TT_IPV6_UDP]);
-               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
-                                               match_criteria, flow_context,
-                                               ft_ix);
-               if (err)
-                       goto err_del_ai;
-
-               ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
-       }
-
-       MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
-                IPPROTO_TCP);
-
-       ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_TCP];
-       if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
-               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
-                        ETH_P_IP);
-               MLX5_SET(dest_format_struct, dest, destination_id,
-                        tirn[MLX5E_TT_IPV4_TCP]);
-               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
-                                               match_criteria, flow_context,
-                                               ft_ix);
-               if (err)
-                       goto err_del_ai;
-
-               ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
-       }
-
-       ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_TCP];
-       if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
-               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
-                        ETH_P_IPV6);
-               MLX5_SET(dest_format_struct, dest, destination_id,
-                        tirn[MLX5E_TT_IPV6_TCP]);
-               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
-                                               match_criteria, flow_context,
-                                               ft_ix);
-               if (err)
-                       goto err_del_ai;
-
-               ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
-       }
-
-       MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
-                IPPROTO_AH);
-
-       ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH];
-       if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
-               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
-                        ETH_P_IP);
-               MLX5_SET(dest_format_struct, dest, destination_id,
-                        tirn[MLX5E_TT_IPV4_IPSEC_AH]);
-               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
-                                               match_criteria, flow_context,
-                                               ft_ix);
-               if (err)
-                       goto err_del_ai;
-
-               ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
-       }
-
-       ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH];
-       if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
-               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
-                        ETH_P_IPV6);
-               MLX5_SET(dest_format_struct, dest, destination_id,
-                        tirn[MLX5E_TT_IPV6_IPSEC_AH]);
-               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
-                                               match_criteria, flow_context,
-                                               ft_ix);
-               if (err)
-                       goto err_del_ai;
-
-               ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
-       }
-
-       MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
-                IPPROTO_ESP);
-
-       ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP];
-       if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
-               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
-                        ETH_P_IP);
-               MLX5_SET(dest_format_struct, dest, destination_id,
-                        tirn[MLX5E_TT_IPV4_IPSEC_ESP]);
-               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
-                                               match_criteria, flow_context,
-                                               ft_ix);
-               if (err)
-                       goto err_del_ai;
-
-               ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
-       }
-
-       ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP];
-       if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
-               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
-                        ETH_P_IPV6);
-               MLX5_SET(dest_format_struct, dest, destination_id,
-                        tirn[MLX5E_TT_IPV6_IPSEC_ESP]);
-               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
-                                               match_criteria, flow_context,
-                                               ft_ix);
-               if (err)
-                       goto err_del_ai;
-
-               ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
-       }
-
-       return 0;
-
-err_del_ai:
-       mlx5e_del_eth_addr_from_flow_table(priv, ai);
-
-       return err;
-}
-
-static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
-                                  struct mlx5e_eth_addr_info *ai, int type)
-{
-       u32 *flow_context;
-       u32 *match_criteria;
-       int err;
-
-       flow_context   = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
-                                     MLX5_ST_SZ_BYTES(dest_format_struct));
-       match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
-       if (!flow_context || !match_criteria) {
-               netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
-               err = -ENOMEM;
-               goto add_eth_addr_rule_out;
-       }
-
-       err = __mlx5e_add_eth_addr_rule(priv, ai, type, flow_context,
-                                       match_criteria);
-       if (err)
-               netdev_err(priv->netdev, "%s: failed\n", __func__);
-
-add_eth_addr_rule_out:
-       kvfree(match_criteria);
-       kvfree(flow_context);
-       return err;
-}
-
-static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
-{
-       struct net_device *ndev = priv->netdev;
-       int max_list_size;
-       int list_size;
-       u16 *vlans;
-       int vlan;
-       int err;
-       int i;
-
-       list_size = 0;
-       for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
-               list_size++;
-
-       max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
-
-       if (list_size > max_list_size) {
-               netdev_warn(ndev,
-                           "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
-                           list_size, max_list_size);
-               list_size = max_list_size;
-       }
-
-       vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
-       if (!vlans)
-               return -ENOMEM;
-
-       i = 0;
-       for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
-               if (i >= list_size)
-                       break;
-               vlans[i++] = vlan;
-       }
-
-       err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
-       if (err)
-               netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
-                          err);
-
-       kfree(vlans);
-       return err;
-}
-
-enum mlx5e_vlan_rule_type {
-       MLX5E_VLAN_RULE_TYPE_UNTAGGED,
-       MLX5E_VLAN_RULE_TYPE_ANY_VID,
-       MLX5E_VLAN_RULE_TYPE_MATCH_VID,
-};
-
-static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
-                              enum mlx5e_vlan_rule_type rule_type, u16 vid)
-{
-       u8 match_criteria_enable = 0;
-       u32 *flow_context;
-       void *match_value;
-       void *dest;
-       u32 *match_criteria;
-       u32 *ft_ix;
-       int err;
-
-       flow_context   = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
-                                     MLX5_ST_SZ_BYTES(dest_format_struct));
-       match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
-       if (!flow_context || !match_criteria) {
-               netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
-               err = -ENOMEM;
-               goto add_vlan_rule_out;
-       }
-       match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
-       dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
-
-       MLX5_SET(flow_context, flow_context, action,
-                MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
-       MLX5_SET(flow_context, flow_context, destination_list_size, 1);
-       MLX5_SET(dest_format_struct, dest, destination_type,
-                MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
-       MLX5_SET(dest_format_struct, dest, destination_id,
-                mlx5_get_flow_table_id(priv->ft.main));
-
-       match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-       MLX5_SET_TO_ONES(fte_match_param, match_criteria,
-                        outer_headers.vlan_tag);
-
-       switch (rule_type) {
-       case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
-               ft_ix = &priv->vlan.untagged_rule_ft_ix;
-               break;
-       case MLX5E_VLAN_RULE_TYPE_ANY_VID:
-               ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
-               MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
-                        1);
-               break;
-       default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
-               err = mlx5e_vport_context_update_vlans(priv);
-               if (err)
-                       goto add_vlan_rule_out;
-
-               ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
-               MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
-                        1);
-               MLX5_SET_TO_ONES(fte_match_param, match_criteria,
-                                outer_headers.first_vid);
-               MLX5_SET(fte_match_param, match_value, outer_headers.first_vid,
-                        vid);
-               break;
-       }
-
-       err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable,
-                                       match_criteria, flow_context, ft_ix);
-       if (err)
-               netdev_err(priv->netdev, "%s: failed\n", __func__);
-
-add_vlan_rule_out:
-       kvfree(match_criteria);
-       kvfree(flow_context);
-       return err;
-}
-
-static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
-                               enum mlx5e_vlan_rule_type rule_type, u16 vid)
-{
-       switch (rule_type) {
-       case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
-               mlx5_del_flow_table_entry(priv->ft.vlan,
-                                         priv->vlan.untagged_rule_ft_ix);
-               break;
-       case MLX5E_VLAN_RULE_TYPE_ANY_VID:
-               mlx5_del_flow_table_entry(priv->ft.vlan,
-                                         priv->vlan.any_vlan_rule_ft_ix);
-               break;
-       case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
-               mlx5_del_flow_table_entry(priv->ft.vlan,
-                                         priv->vlan.active_vlans_ft_ix[vid]);
-               mlx5e_vport_context_update_vlans(priv);
-               break;
-       }
-}
-
-void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
-{
-       if (!priv->vlan.filter_disabled)
-               return;
-
-       priv->vlan.filter_disabled = false;
-       if (priv->netdev->flags & IFF_PROMISC)
-               return;
-       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
-}
-
-void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
-{
-       if (priv->vlan.filter_disabled)
-               return;
-
-       priv->vlan.filter_disabled = true;
-       if (priv->netdev->flags & IFF_PROMISC)
-               return;
-       mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
-}
-
-int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
-                         u16 vid)
-{
-       struct mlx5e_priv *priv = netdev_priv(dev);
-
-       set_bit(vid, priv->vlan.active_vlans);
-
-       return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
-}
-
-int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
-                          u16 vid)
-{
-       struct mlx5e_priv *priv = netdev_priv(dev);
-
-       clear_bit(vid, priv->vlan.active_vlans);
-
-       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
-
-       return 0;
-}
-
-#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
-       for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
-               hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
-
-static void mlx5e_execute_action(struct mlx5e_priv *priv,
-                                struct mlx5e_eth_addr_hash_node *hn)
-{
-       switch (hn->action) {
-       case MLX5E_ACTION_ADD:
-               mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
-               hn->action = MLX5E_ACTION_NONE;
-               break;
-
-       case MLX5E_ACTION_DEL:
-               mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
-               mlx5e_del_eth_addr_from_hash(hn);
-               break;
-       }
-}
-
-static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
-{
-       struct net_device *netdev = priv->netdev;
-       struct netdev_hw_addr *ha;
-
-       netif_addr_lock_bh(netdev);
-
-       mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
-                                  priv->netdev->dev_addr);
-
-       netdev_for_each_uc_addr(ha, netdev)
-               mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
-
-       netdev_for_each_mc_addr(ha, netdev)
-               mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
-
-       netif_addr_unlock_bh(netdev);
-}
-
-static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
-                                 u8 addr_array[][ETH_ALEN], int size)
-{
-       bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
-       struct net_device *ndev = priv->netdev;
-       struct mlx5e_eth_addr_hash_node *hn;
-       struct hlist_head *addr_list;
-       struct hlist_node *tmp;
-       int i = 0;
-       int hi;
-
-       addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
-
-       if (is_uc) /* Make sure our own address is pushed first */
-               ether_addr_copy(addr_array[i++], ndev->dev_addr);
-       else if (priv->eth_addr.broadcast_enabled)
-               ether_addr_copy(addr_array[i++], ndev->broadcast);
-
-       mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
-               if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
-                       continue;
-               if (i >= size)
-                       break;
-               ether_addr_copy(addr_array[i++], hn->ai.addr);
-       }
-}
-
-static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
-                                                int list_type)
-{
-       bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
-       struct mlx5e_eth_addr_hash_node *hn;
-       u8 (*addr_array)[ETH_ALEN] = NULL;
-       struct hlist_head *addr_list;
-       struct hlist_node *tmp;
-       int max_size;
-       int size;
-       int err;
-       int hi;
-
-       size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
-       max_size = is_uc ?
-               1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
-               1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
-
-       addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
-       mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
-               size++;
-
-       if (size > max_size) {
-               netdev_warn(priv->netdev,
-                           "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
-                           is_uc ? "UC" : "MC", size, max_size);
-               size = max_size;
-       }
-
-       if (size) {
-               addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
-               if (!addr_array) {
-                       err = -ENOMEM;
-                       goto out;
-               }
-               mlx5e_fill_addr_array(priv, list_type, addr_array, size);
-       }
-
-       err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
-out:
-       if (err)
-               netdev_err(priv->netdev,
-                          "Failed to modify vport %s list err(%d)\n",
-                          is_uc ? "UC" : "MC", err);
-       kfree(addr_array);
-}
-
-static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
-{
-       struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
-
-       mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
-       mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
-       mlx5_modify_nic_vport_promisc(priv->mdev, 0,
-                                     ea->allmulti_enabled,
-                                     ea->promisc_enabled);
-}
-
-static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
-{
-       struct mlx5e_eth_addr_hash_node *hn;
-       struct hlist_node *tmp;
-       int i;
-
-       mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
-               mlx5e_execute_action(priv, hn);
-
-       mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
-               mlx5e_execute_action(priv, hn);
-}
-
-static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
-{
-       struct mlx5e_eth_addr_hash_node *hn;
-       struct hlist_node *tmp;
-       int i;
-
-       mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
-               hn->action = MLX5E_ACTION_DEL;
-       mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
-               hn->action = MLX5E_ACTION_DEL;
-
-       if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
-               mlx5e_sync_netdev_addr(priv);
-
-       mlx5e_apply_netdev_addr(priv);
-}
-
-void mlx5e_set_rx_mode_work(struct work_struct *work)
-{
-       struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
-                                              set_rx_mode_work);
-
-       struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
-       struct net_device *ndev = priv->netdev;
-
-       bool rx_mode_enable   = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
-       bool promisc_enabled   = rx_mode_enable && (ndev->flags & IFF_PROMISC);
-       bool allmulti_enabled  = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
-       bool broadcast_enabled = rx_mode_enable;
-
-       bool enable_promisc    = !ea->promisc_enabled   &&  promisc_enabled;
-       bool disable_promisc   =  ea->promisc_enabled   && !promisc_enabled;
-       bool enable_allmulti   = !ea->allmulti_enabled  &&  allmulti_enabled;
-       bool disable_allmulti  =  ea->allmulti_enabled  && !allmulti_enabled;
-       bool enable_broadcast  = !ea->broadcast_enabled &&  broadcast_enabled;
-       bool disable_broadcast =  ea->broadcast_enabled && !broadcast_enabled;
-
-       if (enable_promisc) {
-               mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
-               if (!priv->vlan.filter_disabled)
-                       mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
-                                           0);
-       }
-       if (enable_allmulti)
-               mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
-       if (enable_broadcast)
-               mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
-
-       mlx5e_handle_netdev_addr(priv);
-
-       if (disable_broadcast)
-               mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
-       if (disable_allmulti)
-               mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
-       if (disable_promisc) {
-               if (!priv->vlan.filter_disabled)
-                       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
-                                           0);
-               mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
-       }
-
-       ea->promisc_enabled   = promisc_enabled;
-       ea->allmulti_enabled  = allmulti_enabled;
-       ea->broadcast_enabled = broadcast_enabled;
-
-       mlx5e_vport_context_update(priv);
-}
-
-void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
-{
-       ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
-}
-
-static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
-{
-       struct mlx5_flow_table_group *g;
-       u8 *dmac;
-
-       g = kcalloc(9, sizeof(*g), GFP_KERNEL);
-       if (!g)
-               return -ENOMEM;
-
-       g[0].log_sz = 3;
-       g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-       MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
-                        outer_headers.ethertype);
-       MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
-                        outer_headers.ip_protocol);
-
-       g[1].log_sz = 1;
-       g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-       MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
-                        outer_headers.ethertype);
-
-       g[2].log_sz = 0;
-
-       g[3].log_sz = 14;
-       g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-       dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria,
-                           outer_headers.dmac_47_16);
-       memset(dmac, 0xff, ETH_ALEN);
-       MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
-                        outer_headers.ethertype);
-       MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
-                        outer_headers.ip_protocol);
-
-       g[4].log_sz = 13;
-       g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-       dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria,
-                           outer_headers.dmac_47_16);
-       memset(dmac, 0xff, ETH_ALEN);
-       MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria,
-                        outer_headers.ethertype);
-
-       g[5].log_sz = 11;
-       g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-       dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria,
-                           outer_headers.dmac_47_16);
-       memset(dmac, 0xff, ETH_ALEN);
-
-       g[6].log_sz = 2;
-       g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-       dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria,
-                           outer_headers.dmac_47_16);
-       dmac[0] = 0x01;
-       MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
-                        outer_headers.ethertype);
-       MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
-                        outer_headers.ip_protocol);
-
-       g[7].log_sz = 1;
-       g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-       dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria,
-                           outer_headers.dmac_47_16);
-       dmac[0] = 0x01;
-       MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria,
-                        outer_headers.ethertype);
-
-       g[8].log_sz = 0;
-       g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-       dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria,
-                           outer_headers.dmac_47_16);
-       dmac[0] = 0x01;
-       priv->ft.main = mlx5_create_flow_table(priv->mdev, 1,
-                                              MLX5_FLOW_TABLE_TYPE_NIC_RCV,
-                                              9, g);
-       kfree(g);
-
-       return priv->ft.main ? 0 : -ENOMEM;
-}
-
-static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
-{
-       mlx5_destroy_flow_table(priv->ft.main);
-}
-
-static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
-{
-       struct mlx5_flow_table_group *g;
-
-       g = kcalloc(2, sizeof(*g), GFP_KERNEL);
-       if (!g)
-               return -ENOMEM;
-
-       g[0].log_sz = 12;
-       g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-       MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
-                        outer_headers.vlan_tag);
-       MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
-                        outer_headers.first_vid);
-
-       /* untagged + any vlan id */
-       g[1].log_sz = 1;
-       g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-       MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
-                        outer_headers.vlan_tag);
-
-       priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
-                                              MLX5_FLOW_TABLE_TYPE_NIC_RCV,
-                                              2, g);
-
-       kfree(g);
-       return priv->ft.vlan ? 0 : -ENOMEM;
-}
-
-static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
-{
-       mlx5_destroy_flow_table(priv->ft.vlan);
-}
-
-int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
-{
-       int err;
-
-       err = mlx5e_create_main_flow_table(priv);
-       if (err)
-               return err;
-
-       err = mlx5e_create_vlan_flow_table(priv);
-       if (err)
-               goto err_destroy_main_flow_table;
-
-       err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
-       if (err)
-               goto err_destroy_vlan_flow_table;
-
-       return 0;
-
-err_destroy_vlan_flow_table:
-       mlx5e_destroy_vlan_flow_table(priv);
-
-err_destroy_main_flow_table:
-       mlx5e_destroy_main_flow_table(priv);
-
-       return err;
-}
-
-void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
-{
-       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
-       mlx5e_destroy_vlan_flow_table(priv);
-       mlx5e_destroy_main_flow_table(priv);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
new file mode 100644 (file)
index 0000000..80d81ab
--- /dev/null
@@ -0,0 +1,1224 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/mlx5/fs.h>
+#include "en.h"
+
+#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
+
+enum {
+       MLX5E_FULLMATCH = 0,
+       MLX5E_ALLMULTI  = 1,
+       MLX5E_PROMISC   = 2,
+};
+
+enum {
+       MLX5E_UC        = 0,
+       MLX5E_MC_IPV4   = 1,
+       MLX5E_MC_IPV6   = 2,
+       MLX5E_MC_OTHER  = 3,
+};
+
+enum {
+       MLX5E_ACTION_NONE = 0,
+       MLX5E_ACTION_ADD  = 1,
+       MLX5E_ACTION_DEL  = 2,
+};
+
+struct mlx5e_eth_addr_hash_node {
+       struct hlist_node          hlist;
+       u8                         action;
+       struct mlx5e_eth_addr_info ai;
+};
+
+static inline int mlx5e_hash_eth_addr(u8 *addr)
+{
+       return addr[5];
+}
+
+static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
+{
+       struct mlx5e_eth_addr_hash_node *hn;
+       int ix = mlx5e_hash_eth_addr(addr);
+       int found = 0;
+
+       hlist_for_each_entry(hn, &hash[ix], hlist)
+               if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
+                       found = 1;
+                       break;
+               }
+
+       if (found) {
+               hn->action = MLX5E_ACTION_NONE;
+               return;
+       }
+
+       hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
+       if (!hn)
+               return;
+
+       ether_addr_copy(hn->ai.addr, addr);
+       hn->action = MLX5E_ACTION_ADD;
+
+       hlist_add_head(&hn->hlist, &hash[ix]);
+}
+
+static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
+{
+       hlist_del(&hn->hlist);
+       kfree(hn);
+}
+
+static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
+                                              struct mlx5e_eth_addr_info *ai)
+{
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
+               mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
+
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
+               mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
+
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
+               mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
+
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
+               mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
+
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
+               mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]);
+
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
+               mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]);
+
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
+               mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]);
+
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
+               mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]);
+
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
+               mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]);
+
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
+               mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]);
+
+       if (ai->tt_vec & BIT(MLX5E_TT_ANY))
+               mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]);
+}
+
+static int mlx5e_get_eth_addr_type(u8 *addr)
+{
+       if (is_unicast_ether_addr(addr))
+               return MLX5E_UC;
+
+       if ((addr[0] == 0x01) &&
+           (addr[1] == 0x00) &&
+           (addr[2] == 0x5e) &&
+          !(addr[3] &  0x80))
+               return MLX5E_MC_IPV4;
+
+       if ((addr[0] == 0x33) &&
+           (addr[1] == 0x33))
+               return MLX5E_MC_IPV6;
+
+       return MLX5E_MC_OTHER;
+}
+
+static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
+{
+       int eth_addr_type;
+       u32 ret;
+
+       switch (type) {
+       case MLX5E_FULLMATCH:
+               eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
+               switch (eth_addr_type) {
+               case MLX5E_UC:
+                       ret =
+                               BIT(MLX5E_TT_IPV4_TCP)       |
+                               BIT(MLX5E_TT_IPV6_TCP)       |
+                               BIT(MLX5E_TT_IPV4_UDP)       |
+                               BIT(MLX5E_TT_IPV6_UDP)       |
+                               BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
+                               BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
+                               BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
+                               BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
+                               BIT(MLX5E_TT_IPV4)           |
+                               BIT(MLX5E_TT_IPV6)           |
+                               BIT(MLX5E_TT_ANY)            |
+                               0;
+                       break;
+
+               case MLX5E_MC_IPV4:
+                       ret =
+                               BIT(MLX5E_TT_IPV4_UDP)       |
+                               BIT(MLX5E_TT_IPV4)           |
+                               0;
+                       break;
+
+               case MLX5E_MC_IPV6:
+                       ret =
+                               BIT(MLX5E_TT_IPV6_UDP)       |
+                               BIT(MLX5E_TT_IPV6)           |
+                               0;
+                       break;
+
+               case MLX5E_MC_OTHER:
+                       ret =
+                               BIT(MLX5E_TT_ANY)            |
+                               0;
+                       break;
+               }
+
+               break;
+
+       case MLX5E_ALLMULTI:
+               ret =
+                       BIT(MLX5E_TT_IPV4_UDP) |
+                       BIT(MLX5E_TT_IPV6_UDP) |
+                       BIT(MLX5E_TT_IPV4)     |
+                       BIT(MLX5E_TT_IPV6)     |
+                       BIT(MLX5E_TT_ANY)      |
+                       0;
+               break;
+
+       default: /* MLX5E_PROMISC */
+               ret =
+                       BIT(MLX5E_TT_IPV4_TCP)       |
+                       BIT(MLX5E_TT_IPV6_TCP)       |
+                       BIT(MLX5E_TT_IPV4_UDP)       |
+                       BIT(MLX5E_TT_IPV6_UDP)       |
+                       BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
+                       BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
+                       BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
+                       BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
+                       BIT(MLX5E_TT_IPV4)           |
+                       BIT(MLX5E_TT_IPV6)           |
+                       BIT(MLX5E_TT_ANY)            |
+                       0;
+               break;
+       }
+
+       return ret;
+}
+
+static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
+                                    struct mlx5e_eth_addr_info *ai,
+                                    int type, u32 *mc, u32 *mv)
+{
+       struct mlx5_flow_destination dest;
+       u8 match_criteria_enable = 0;
+       struct mlx5_flow_rule **rule_p;
+       struct mlx5_flow_table *ft = priv->fts.main.t;
+       u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
+                                  outer_headers.dmac_47_16);
+       u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
+                                  outer_headers.dmac_47_16);
+       u32 *tirn = priv->tirn;
+       u32 tt_vec;
+       int err = 0;
+
+       dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+
+       switch (type) {
+       case MLX5E_FULLMATCH:
+               match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+               eth_broadcast_addr(mc_dmac);
+               ether_addr_copy(mv_dmac, ai->addr);
+               break;
+
+       case MLX5E_ALLMULTI:
+               match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+               mc_dmac[0] = 0x01;
+               mv_dmac[0] = 0x01;
+               break;
+
+       case MLX5E_PROMISC:
+               break;
+       }
+
+       tt_vec = mlx5e_get_tt_vec(ai, type);
+
+       if (tt_vec & BIT(MLX5E_TT_ANY)) {
+               rule_p = &ai->ft_rule[MLX5E_TT_ANY];
+               dest.tir_num = tirn[MLX5E_TT_ANY];
+               *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+                                            MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+                                            MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+               if (IS_ERR_OR_NULL(*rule_p))
+                       goto err_del_ai;
+               ai->tt_vec |= BIT(MLX5E_TT_ANY);
+       }
+
+       match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+
+       if (tt_vec & BIT(MLX5E_TT_IPV4)) {
+               rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
+               dest.tir_num = tirn[MLX5E_TT_IPV4];
+               MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
+                        ETH_P_IP);
+               *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+                                            MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+                                            MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+               if (IS_ERR_OR_NULL(*rule_p))
+                       goto err_del_ai;
+               ai->tt_vec |= BIT(MLX5E_TT_IPV4);
+       }
+
+       if (tt_vec & BIT(MLX5E_TT_IPV6)) {
+               rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
+               dest.tir_num = tirn[MLX5E_TT_IPV6];
+               MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
+                        ETH_P_IPV6);
+               *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+                                            MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+                                            MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+               if (IS_ERR_OR_NULL(*rule_p))
+                       goto err_del_ai;
+               ai->tt_vec |= BIT(MLX5E_TT_IPV6);
+       }
+
+       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
+       MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
+
+       if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
+               rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
+               dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
+               MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
+                        ETH_P_IP);
+               *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+                                            MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+                                            MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+               if (IS_ERR_OR_NULL(*rule_p))
+                       goto err_del_ai;
+               ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
+       }
+
+       if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
+               rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
+               dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
+               MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
+                        ETH_P_IPV6);
+               *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+                                            MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+                                            MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+               if (IS_ERR_OR_NULL(*rule_p))
+                       goto err_del_ai;
+               ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
+       }
+
+       MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
+
+       if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
+               rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
+               dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
+               MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
+                        ETH_P_IP);
+               *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+                                            MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+                                            MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+               if (IS_ERR_OR_NULL(*rule_p))
+                       goto err_del_ai;
+               ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
+       }
+
+       if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
+               rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
+               dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
+               MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
+                        ETH_P_IPV6);
+               *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+                                            MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+                                            MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+               if (IS_ERR_OR_NULL(*rule_p))
+                       goto err_del_ai;
+
+               ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
+       }
+
+       MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
+
+       if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
+               rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
+               dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
+               MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
+                        ETH_P_IP);
+               *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+                                            MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+                                            MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+               if (IS_ERR_OR_NULL(*rule_p))
+                       goto err_del_ai;
+               ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
+       }
+
+       if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
+               rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
+               dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
+               MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
+                        ETH_P_IPV6);
+               *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+                                            MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+                                            MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+               if (IS_ERR_OR_NULL(*rule_p))
+                       goto err_del_ai;
+               ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
+       }
+
+       MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
+
+       if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
+               rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
+               dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
+               MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
+                        ETH_P_IP);
+               *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+                                            MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+                                            MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+               if (IS_ERR_OR_NULL(*rule_p))
+                       goto err_del_ai;
+               ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
+       }
+
+       if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
+               rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
+               dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
+               MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
+                        ETH_P_IPV6);
+               *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+                                            MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+                                            MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+               if (IS_ERR_OR_NULL(*rule_p))
+                       goto err_del_ai;
+               ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
+       }
+
+       return 0;
+
+err_del_ai:
+       err = PTR_ERR(*rule_p);
+       *rule_p = NULL;
+       mlx5e_del_eth_addr_from_flow_table(priv, ai);
+
+       return err;
+}
+
+static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
+                                  struct mlx5e_eth_addr_info *ai, int type)
+{
+       u32 *match_criteria;
+       u32 *match_value;
+       int err = 0;
+
+       match_value     = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+       match_criteria  = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+       if (!match_value || !match_criteria) {
+               netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+               err = -ENOMEM;
+               goto add_eth_addr_rule_out;
+       }
+
+       err = __mlx5e_add_eth_addr_rule(priv, ai, type, match_criteria,
+                                       match_value);
+
+add_eth_addr_rule_out:
+       kvfree(match_criteria);
+       kvfree(match_value);
+
+       return err;
+}
+
+static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
+{
+       struct net_device *ndev = priv->netdev;
+       int max_list_size;
+       int list_size;
+       u16 *vlans;
+       int vlan;
+       int err;
+       int i;
+
+       list_size = 0;
+       for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
+               list_size++;
+
+       max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
+
+       if (list_size > max_list_size) {
+               netdev_warn(ndev,
+                           "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
+                           list_size, max_list_size);
+               list_size = max_list_size;
+       }
+
+       vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
+       if (!vlans)
+               return -ENOMEM;
+
+       i = 0;
+       for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
+               if (i >= list_size)
+                       break;
+               vlans[i++] = vlan;
+       }
+
+       err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
+       if (err)
+               netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
+                          err);
+
+       kfree(vlans);
+       return err;
+}
+
+enum mlx5e_vlan_rule_type {
+       MLX5E_VLAN_RULE_TYPE_UNTAGGED,
+       MLX5E_VLAN_RULE_TYPE_ANY_VID,
+       MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+};
+
+static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+                                enum mlx5e_vlan_rule_type rule_type,
+                                u16 vid, u32 *mc, u32 *mv)
+{
+       struct mlx5_flow_table *ft = priv->fts.vlan.t;
+       struct mlx5_flow_destination dest;
+       u8 match_criteria_enable = 0;
+       struct mlx5_flow_rule **rule_p;
+       int err = 0;
+
+       dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+       dest.ft = priv->fts.main.t;
+
+       match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
+
+       switch (rule_type) {
+       case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+               rule_p = &priv->vlan.untagged_rule;
+               break;
+       case MLX5E_VLAN_RULE_TYPE_ANY_VID:
+               rule_p = &priv->vlan.any_vlan_rule;
+               MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
+               break;
+       default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
+               rule_p = &priv->vlan.active_vlans_rule[vid];
+               MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
+               MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
+               MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
+               break;
+       }
+
+       *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+                                    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+                                    MLX5_FS_DEFAULT_FLOW_TAG,
+                                    &dest);
+
+       if (IS_ERR(*rule_p)) {
+               err = PTR_ERR(*rule_p);
+               *rule_p = NULL;
+               netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
+       }
+
+       return err;
+}
+
+static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+                              enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+       u32 *match_criteria;
+       u32 *match_value;
+       int err = 0;
+
+       match_value     = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+       match_criteria  = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+       if (!match_value || !match_criteria) {
+               netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+               err = -ENOMEM;
+               goto add_vlan_rule_out;
+       }
+
+       if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
+               mlx5e_vport_context_update_vlans(priv);
+
+       err = __mlx5e_add_vlan_rule(priv, rule_type, vid, match_criteria,
+                                   match_value);
+
+add_vlan_rule_out:
+       kvfree(match_criteria);
+       kvfree(match_value);
+
+       return err;
+}
+
+static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
+                               enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+       switch (rule_type) {
+       case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+               if (priv->vlan.untagged_rule) {
+                       mlx5_del_flow_rule(priv->vlan.untagged_rule);
+                       priv->vlan.untagged_rule = NULL;
+               }
+               break;
+       case MLX5E_VLAN_RULE_TYPE_ANY_VID:
+               if (priv->vlan.any_vlan_rule) {
+                       mlx5_del_flow_rule(priv->vlan.any_vlan_rule);
+                       priv->vlan.any_vlan_rule = NULL;
+               }
+               break;
+       case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
+               mlx5e_vport_context_update_vlans(priv);
+               if (priv->vlan.active_vlans_rule[vid]) {
+                       mlx5_del_flow_rule(priv->vlan.active_vlans_rule[vid]);
+                       priv->vlan.active_vlans_rule[vid] = NULL;
+               }
+               mlx5e_vport_context_update_vlans(priv);
+               break;
+       }
+}
+
+void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
+{
+       if (!priv->vlan.filter_disabled)
+               return;
+
+       priv->vlan.filter_disabled = false;
+       if (priv->netdev->flags & IFF_PROMISC)
+               return;
+       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+}
+
+void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
+{
+       if (priv->vlan.filter_disabled)
+               return;
+
+       priv->vlan.filter_disabled = true;
+       if (priv->netdev->flags & IFF_PROMISC)
+               return;
+       mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+}
+
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
+                         u16 vid)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+
+       set_bit(vid, priv->vlan.active_vlans);
+
+       return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+}
+
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
+                          u16 vid)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+
+       clear_bit(vid, priv->vlan.active_vlans);
+
+       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+
+       return 0;
+}
+
+#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
+       for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
+               hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
+
+static void mlx5e_execute_action(struct mlx5e_priv *priv,
+                                struct mlx5e_eth_addr_hash_node *hn)
+{
+       switch (hn->action) {
+       case MLX5E_ACTION_ADD:
+               mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
+               hn->action = MLX5E_ACTION_NONE;
+               break;
+
+       case MLX5E_ACTION_DEL:
+               mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
+               mlx5e_del_eth_addr_from_hash(hn);
+               break;
+       }
+}
+
+static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
+{
+       struct net_device *netdev = priv->netdev;
+       struct netdev_hw_addr *ha;
+
+       netif_addr_lock_bh(netdev);
+
+       mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
+                                  priv->netdev->dev_addr);
+
+       netdev_for_each_uc_addr(ha, netdev)
+               mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
+
+       netdev_for_each_mc_addr(ha, netdev)
+               mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
+
+       netif_addr_unlock_bh(netdev);
+}
+
+static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
+                                 u8 addr_array[][ETH_ALEN], int size)
+{
+       bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
+       struct net_device *ndev = priv->netdev;
+       struct mlx5e_eth_addr_hash_node *hn;
+       struct hlist_head *addr_list;
+       struct hlist_node *tmp;
+       int i = 0;
+       int hi;
+
+       addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
+
+       if (is_uc) /* Make sure our own address is pushed first */
+               ether_addr_copy(addr_array[i++], ndev->dev_addr);
+       else if (priv->eth_addr.broadcast_enabled)
+               ether_addr_copy(addr_array[i++], ndev->broadcast);
+
+       mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
+               if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
+                       continue;
+               if (i >= size)
+                       break;
+               ether_addr_copy(addr_array[i++], hn->ai.addr);
+       }
+}
+
+static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
+                                                int list_type)
+{
+       bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
+       struct mlx5e_eth_addr_hash_node *hn;
+       u8 (*addr_array)[ETH_ALEN] = NULL;
+       struct hlist_head *addr_list;
+       struct hlist_node *tmp;
+       int max_size;
+       int size;
+       int err;
+       int hi;
+
+       size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
+       max_size = is_uc ?
+               1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
+               1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
+
+       addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
+       mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
+               size++;
+
+       if (size > max_size) {
+               netdev_warn(priv->netdev,
+                           "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
+                           is_uc ? "UC" : "MC", size, max_size);
+               size = max_size;
+       }
+
+       if (size) {
+               addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
+               if (!addr_array) {
+                       err = -ENOMEM;
+                       goto out;
+               }
+               mlx5e_fill_addr_array(priv, list_type, addr_array, size);
+       }
+
+       err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
+out:
+       if (err)
+               netdev_err(priv->netdev,
+                          "Failed to modify vport %s list err(%d)\n",
+                          is_uc ? "UC" : "MC", err);
+       kfree(addr_array);
+}
+
+static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
+{
+       struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
+
+       mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
+       mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
+       mlx5_modify_nic_vport_promisc(priv->mdev, 0,
+                                     ea->allmulti_enabled,
+                                     ea->promisc_enabled);
+}
+
+static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
+{
+       struct mlx5e_eth_addr_hash_node *hn;
+       struct hlist_node *tmp;
+       int i;
+
+       mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
+               mlx5e_execute_action(priv, hn);
+
+       mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
+               mlx5e_execute_action(priv, hn);
+}
+
+static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
+{
+       struct mlx5e_eth_addr_hash_node *hn;
+       struct hlist_node *tmp;
+       int i;
+
+       mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
+               hn->action = MLX5E_ACTION_DEL;
+       mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
+               hn->action = MLX5E_ACTION_DEL;
+
+       if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
+               mlx5e_sync_netdev_addr(priv);
+
+       mlx5e_apply_netdev_addr(priv);
+}
+
+void mlx5e_set_rx_mode_work(struct work_struct *work)
+{
+       struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+                                              set_rx_mode_work);
+
+       struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
+       struct net_device *ndev = priv->netdev;
+
+       bool rx_mode_enable   = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
+       bool promisc_enabled   = rx_mode_enable && (ndev->flags & IFF_PROMISC);
+       bool allmulti_enabled  = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
+       bool broadcast_enabled = rx_mode_enable;
+
+       bool enable_promisc    = !ea->promisc_enabled   &&  promisc_enabled;
+       bool disable_promisc   =  ea->promisc_enabled   && !promisc_enabled;
+       bool enable_allmulti   = !ea->allmulti_enabled  &&  allmulti_enabled;
+       bool disable_allmulti  =  ea->allmulti_enabled  && !allmulti_enabled;
+       bool enable_broadcast  = !ea->broadcast_enabled &&  broadcast_enabled;
+       bool disable_broadcast =  ea->broadcast_enabled && !broadcast_enabled;
+
+       if (enable_promisc) {
+               mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
+               if (!priv->vlan.filter_disabled)
+                       mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+                                           0);
+       }
+       if (enable_allmulti)
+               mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
+       if (enable_broadcast)
+               mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
+
+       mlx5e_handle_netdev_addr(priv);
+
+       if (disable_broadcast)
+               mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
+       if (disable_allmulti)
+               mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
+       if (disable_promisc) {
+               if (!priv->vlan.filter_disabled)
+                       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+                                           0);
+               mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
+       }
+
+       ea->promisc_enabled   = promisc_enabled;
+       ea->allmulti_enabled  = allmulti_enabled;
+       ea->broadcast_enabled = broadcast_enabled;
+
+       mlx5e_vport_context_update(priv);
+}
+
+static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
+{
+       int i;
+
+       for (i = ft->num_groups - 1; i >= 0; i--) {
+               if (!IS_ERR_OR_NULL(ft->g[i]))
+                       mlx5_destroy_flow_group(ft->g[i]);
+               ft->g[i] = NULL;
+       }
+       ft->num_groups = 0;
+}
+
+void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
+{
+       ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
+}
+
+#define MLX5E_MAIN_GROUP0_SIZE BIT(3)
+#define MLX5E_MAIN_GROUP1_SIZE BIT(1)
+#define MLX5E_MAIN_GROUP2_SIZE BIT(0)
+#define MLX5E_MAIN_GROUP3_SIZE BIT(14)
+#define MLX5E_MAIN_GROUP4_SIZE BIT(13)
+#define MLX5E_MAIN_GROUP5_SIZE BIT(11)
+#define MLX5E_MAIN_GROUP6_SIZE BIT(2)
+#define MLX5E_MAIN_GROUP7_SIZE BIT(1)
+#define MLX5E_MAIN_GROUP8_SIZE BIT(0)
+#define MLX5E_MAIN_TABLE_SIZE  (MLX5E_MAIN_GROUP0_SIZE +\
+                                MLX5E_MAIN_GROUP1_SIZE +\
+                                MLX5E_MAIN_GROUP2_SIZE +\
+                                MLX5E_MAIN_GROUP3_SIZE +\
+                                MLX5E_MAIN_GROUP4_SIZE +\
+                                MLX5E_MAIN_GROUP5_SIZE +\
+                                MLX5E_MAIN_GROUP6_SIZE +\
+                                MLX5E_MAIN_GROUP7_SIZE +\
+                                MLX5E_MAIN_GROUP8_SIZE)
+
+static int __mlx5e_create_main_groups(struct mlx5e_flow_table *ft, u32 *in,
+                                     int inlen)
+{
+       u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+       u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
+                               match_criteria.outer_headers.dmac_47_16);
+       int err;
+       int ix = 0;
+
+       memset(in, 0, inlen);
+       MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
+       MLX5_SET_CFG(in, start_flow_index, ix);
+       ix += MLX5E_MAIN_GROUP0_SIZE;
+       MLX5_SET_CFG(in, end_flow_index, ix - 1);
+       ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+       if (IS_ERR(ft->g[ft->num_groups]))
+               goto err_destroy_groups;
+       ft->num_groups++;
+
+       memset(in, 0, inlen);
+       MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+       MLX5_SET_CFG(in, start_flow_index, ix);
+       ix += MLX5E_MAIN_GROUP1_SIZE;
+       MLX5_SET_CFG(in, end_flow_index, ix - 1);
+       ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+       if (IS_ERR(ft->g[ft->num_groups]))
+               goto err_destroy_groups;
+       ft->num_groups++;
+
+       memset(in, 0, inlen);
+       MLX5_SET_CFG(in, start_flow_index, ix);
+       ix += MLX5E_MAIN_GROUP2_SIZE;
+       MLX5_SET_CFG(in, end_flow_index, ix - 1);
+       ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+       if (IS_ERR(ft->g[ft->num_groups]))
+               goto err_destroy_groups;
+       ft->num_groups++;
+
+       memset(in, 0, inlen);
+       MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
+       eth_broadcast_addr(dmac);
+       MLX5_SET_CFG(in, start_flow_index, ix);
+       ix += MLX5E_MAIN_GROUP3_SIZE;
+       MLX5_SET_CFG(in, end_flow_index, ix - 1);
+       ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+       if (IS_ERR(ft->g[ft->num_groups]))
+               goto err_destroy_groups;
+       ft->num_groups++;
+
+       memset(in, 0, inlen);
+       MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+       eth_broadcast_addr(dmac);
+       MLX5_SET_CFG(in, start_flow_index, ix);
+       ix += MLX5E_MAIN_GROUP4_SIZE;
+       MLX5_SET_CFG(in, end_flow_index, ix - 1);
+       ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+       if (IS_ERR(ft->g[ft->num_groups]))
+               goto err_destroy_groups;
+       ft->num_groups++;
+
+       memset(in, 0, inlen);
+       MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+       eth_broadcast_addr(dmac);
+       MLX5_SET_CFG(in, start_flow_index, ix);
+       ix += MLX5E_MAIN_GROUP5_SIZE;
+       MLX5_SET_CFG(in, end_flow_index, ix - 1);
+       ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+       if (IS_ERR(ft->g[ft->num_groups]))
+               goto err_destroy_groups;
+       ft->num_groups++;
+
+       memset(in, 0, inlen);
+       MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
+       dmac[0] = 0x01;
+       MLX5_SET_CFG(in, start_flow_index, ix);
+       ix += MLX5E_MAIN_GROUP6_SIZE;
+       MLX5_SET_CFG(in, end_flow_index, ix - 1);
+       ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+       if (IS_ERR(ft->g[ft->num_groups]))
+               goto err_destroy_groups;
+       ft->num_groups++;
+
+       memset(in, 0, inlen);
+       MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+       dmac[0] = 0x01;
+       MLX5_SET_CFG(in, start_flow_index, ix);
+       ix += MLX5E_MAIN_GROUP7_SIZE;
+       MLX5_SET_CFG(in, end_flow_index, ix - 1);
+       ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+       if (IS_ERR(ft->g[ft->num_groups]))
+               goto err_destroy_groups;
+       ft->num_groups++;
+
+       memset(in, 0, inlen);
+       MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+       dmac[0] = 0x01;
+       MLX5_SET_CFG(in, start_flow_index, ix);
+       ix += MLX5E_MAIN_GROUP8_SIZE;
+       MLX5_SET_CFG(in, end_flow_index, ix - 1);
+       ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+       if (IS_ERR(ft->g[ft->num_groups]))
+               goto err_destroy_groups;
+       ft->num_groups++;
+
+       return 0;
+
+err_destroy_groups:
+       err = PTR_ERR(ft->g[ft->num_groups]);
+       ft->g[ft->num_groups] = NULL;
+       mlx5e_destroy_groups(ft);
+
+       return err;
+}
+
+static int mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
+{
+       u32 *in;
+       int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+       int err;
+
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       err = __mlx5e_create_main_groups(ft, in, inlen);
+
+       kvfree(in);
+       return err;
+}
+
+static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
+{
+       struct mlx5e_flow_table *ft = &priv->fts.main;
+       int err;
+
+       ft->num_groups = 0;
+       ft->t = mlx5_create_flow_table(priv->fts.ns, 0, MLX5E_MAIN_TABLE_SIZE);
+
+       if (IS_ERR(ft->t)) {
+               err = PTR_ERR(ft->t);
+               ft->t = NULL;
+               return err;
+       }
+       ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+       if (!ft->g) {
+               err = -ENOMEM;
+               goto err_destroy_main_flow_table;
+       }
+
+       err = mlx5e_create_main_groups(ft);
+       if (err)
+               goto err_free_g;
+       return 0;
+
+err_free_g:
+       kfree(ft->g);
+
+err_destroy_main_flow_table:
+       mlx5_destroy_flow_table(ft->t);
+       ft->t = NULL;
+
+       return err;
+}
+
+static void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
+{
+       mlx5e_destroy_groups(ft);
+       kfree(ft->g);
+       mlx5_destroy_flow_table(ft->t);
+       ft->t = NULL;
+}
+
+static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
+{
+       mlx5e_destroy_flow_table(&priv->fts.main);
+}
+
+#define MLX5E_NUM_VLAN_GROUPS  2
+#define MLX5E_VLAN_GROUP0_SIZE BIT(12)
+#define MLX5E_VLAN_GROUP1_SIZE BIT(1)
+#define MLX5E_VLAN_TABLE_SIZE  (MLX5E_VLAN_GROUP0_SIZE +\
+                                MLX5E_VLAN_GROUP1_SIZE)
+
+static int __mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft, u32 *in,
+                                     int inlen)
+{
+       int err;
+       int ix = 0;
+       u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+       memset(in, 0, inlen);
+       MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
+       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
+       MLX5_SET_CFG(in, start_flow_index, ix);
+       ix += MLX5E_VLAN_GROUP0_SIZE;
+       MLX5_SET_CFG(in, end_flow_index, ix - 1);
+       ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+       if (IS_ERR(ft->g[ft->num_groups]))
+               goto err_destroy_groups;
+       ft->num_groups++;
+
+       memset(in, 0, inlen);
+       MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
+       MLX5_SET_CFG(in, start_flow_index, ix);
+       ix += MLX5E_VLAN_GROUP1_SIZE;
+       MLX5_SET_CFG(in, end_flow_index, ix - 1);
+       ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+       if (IS_ERR(ft->g[ft->num_groups]))
+               goto err_destroy_groups;
+       ft->num_groups++;
+
+       return 0;
+
+err_destroy_groups:
+       err = PTR_ERR(ft->g[ft->num_groups]);
+       ft->g[ft->num_groups] = NULL;
+       mlx5e_destroy_groups(ft);
+
+       return err;
+}
+
+static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
+{
+       u32 *in;
+       int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+       int err;
+
+       in = mlx5_vzalloc(inlen);
+       if (!in)
+               return -ENOMEM;
+
+       err = __mlx5e_create_vlan_groups(ft, in, inlen);
+
+       kvfree(in);
+       return err;
+}
+
+static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
+{
+       struct mlx5e_flow_table *ft = &priv->fts.vlan;
+       int err;
+
+       ft->num_groups = 0;
+       ft->t = mlx5_create_flow_table(priv->fts.ns, 0, MLX5E_VLAN_TABLE_SIZE);
+
+       if (IS_ERR(ft->t)) {
+               err = PTR_ERR(ft->t);
+               ft->t = NULL;
+               return err;
+       }
+       ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+       if (!ft->g) {
+               err = -ENOMEM;
+               goto err_destroy_vlan_flow_table;
+       }
+
+       err = mlx5e_create_vlan_groups(ft);
+       if (err)
+               goto err_free_g;
+
+       return 0;
+
+err_free_g:
+       kfree(ft->g);
+
+err_destroy_vlan_flow_table:
+       mlx5_destroy_flow_table(ft->t);
+       ft->t = NULL;
+
+       return err;
+}
+
+static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
+{
+       mlx5e_destroy_flow_table(&priv->fts.vlan);
+}
+
+int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
+{
+       int err;
+
+       priv->fts.ns = mlx5_get_flow_namespace(priv->mdev,
+                                              MLX5_FLOW_NAMESPACE_KERNEL);
+
+       if (!priv->fts.ns)
+               return -EINVAL;
+
+       err = mlx5e_create_vlan_flow_table(priv);
+       if (err)
+               return err;
+
+       err = mlx5e_create_main_flow_table(priv);
+       if (err)
+               goto err_destroy_vlan_flow_table;
+
+       err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+       if (err)
+               goto err_destroy_main_flow_table;
+
+       return 0;
+
+err_destroy_main_flow_table:
+       mlx5e_destroy_main_flow_table(priv);
+err_destroy_vlan_flow_table:
+       mlx5e_destroy_vlan_flow_table(priv);
+
+       return err;
+}
+
+void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
+{
+       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+       mlx5e_destroy_main_flow_table(priv);
+       mlx5e_destroy_vlan_flow_table(priv);
+}
index d67058afe87ec96be19db7f43d6b3bb22006e7a6..d4601a5646997d07704379d6359e389f1040dc8c 100644 (file)
@@ -30,7 +30,7 @@
  * SOFTWARE.
  */
 
-#include <linux/mlx5/flow_table.h>
+#include <linux/mlx5/fs.h>
 #include "en.h"
 #include "eswitch.h"
 
@@ -2103,6 +2103,11 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
        struct mlx5e_priv *priv = netdev_priv(netdev);
 
        mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
+       if (is_zero_ether_addr(netdev->dev_addr) &&
+           !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
+               eth_hw_addr_random(netdev);
+               mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
+       }
 }
 
 static void mlx5e_build_netdev(struct net_device *netdev)
index d8939e597c546563b42afed77beba8c3b1ca8958..bc3d9f8a75c1d86532d9b579b4b8c6f8a36f9f46 100644 (file)
@@ -34,7 +34,7 @@
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/mlx5_ifc.h>
 #include <linux/mlx5/vport.h>
-#include <linux/mlx5/flow_table.h>
+#include <linux/mlx5/fs.h>
 #include "mlx5_core.h"
 #include "eswitch.h"
 
@@ -321,220 +321,6 @@ static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index)
        free_l2_table_index(l2_table, index);
 }
 
-/* E-Switch FDB flow steering */
-struct dest_node {
-       struct list_head list;
-       struct mlx5_flow_destination dest;
-};
-
-static int _mlx5_flow_rule_apply(struct mlx5_flow_rule *fr)
-{
-       bool was_valid = fr->valid;
-       struct dest_node *dest_n;
-       u32 dest_list_size = 0;
-       void *in_match_value;
-       u32 *flow_context;
-       u32 flow_index;
-       int err;
-       int i;
-
-       if (list_empty(&fr->dest_list)) {
-               if (fr->valid)
-                       mlx5_del_flow_table_entry(fr->ft, fr->fi);
-               fr->valid = false;
-               return 0;
-       }
-
-       list_for_each_entry(dest_n, &fr->dest_list, list)
-               dest_list_size++;
-
-       flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
-                                   MLX5_ST_SZ_BYTES(dest_format_struct) *
-                                   dest_list_size);
-       if (!flow_context)
-               return -ENOMEM;
-
-       MLX5_SET(flow_context, flow_context, flow_tag, fr->flow_tag);
-       MLX5_SET(flow_context, flow_context, action, fr->action);
-       MLX5_SET(flow_context, flow_context, destination_list_size,
-                dest_list_size);
-
-       i = 0;
-       list_for_each_entry(dest_n, &fr->dest_list, list) {
-               void *dest_addr = MLX5_ADDR_OF(flow_context, flow_context,
-                                              destination[i++]);
-
-               MLX5_SET(dest_format_struct, dest_addr, destination_type,
-                        dest_n->dest.type);
-               MLX5_SET(dest_format_struct, dest_addr, destination_id,
-                        dest_n->dest.vport_num);
-       }
-
-       in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
-       memcpy(in_match_value, fr->match_value, MLX5_ST_SZ_BYTES(fte_match_param));
-
-       err = mlx5_add_flow_table_entry(fr->ft, fr->match_criteria_enable,
-                                       fr->match_criteria, flow_context,
-                                       &flow_index);
-       if (!err) {
-               if (was_valid)
-                       mlx5_del_flow_table_entry(fr->ft, fr->fi);
-               fr->fi = flow_index;
-               fr->valid = true;
-       }
-       kfree(flow_context);
-       return err;
-}
-
-static int mlx5_flow_rule_add_dest(struct mlx5_flow_rule *fr,
-                                  struct mlx5_flow_destination *new_dest)
-{
-       struct dest_node *dest_n;
-       int err;
-
-       dest_n = kzalloc(sizeof(*dest_n), GFP_KERNEL);
-       if (!dest_n)
-               return -ENOMEM;
-
-       memcpy(&dest_n->dest, new_dest, sizeof(dest_n->dest));
-       mutex_lock(&fr->mutex);
-       list_add(&dest_n->list, &fr->dest_list);
-       err = _mlx5_flow_rule_apply(fr);
-       if (err) {
-               list_del(&dest_n->list);
-               kfree(dest_n);
-       }
-       mutex_unlock(&fr->mutex);
-       return err;
-}
-
-static int mlx5_flow_rule_del_dest(struct mlx5_flow_rule *fr,
-                                  struct mlx5_flow_destination *dest)
-{
-       struct dest_node *dest_n;
-       struct dest_node *n;
-       int err;
-
-       mutex_lock(&fr->mutex);
-       list_for_each_entry_safe(dest_n, n, &fr->dest_list, list) {
-               if (dest->vport_num == dest_n->dest.vport_num)
-                       goto found;
-       }
-       mutex_unlock(&fr->mutex);
-       return -ENOENT;
-
-found:
-       list_del(&dest_n->list);
-       err = _mlx5_flow_rule_apply(fr);
-       mutex_unlock(&fr->mutex);
-       kfree(dest_n);
-
-       return err;
-}
-
-static struct mlx5_flow_rule *find_fr(struct mlx5_eswitch *esw,
-                                     u8 match_criteria_enable,
-                                     u32 *match_value)
-{
-       struct hlist_head *hash = esw->mc_table;
-       struct esw_mc_addr *esw_mc;
-       u8 *dmac_v;
-
-       dmac_v = MLX5_ADDR_OF(fte_match_param, match_value,
-                             outer_headers.dmac_47_16);
-
-       /* UNICAST FULL MATCH */
-       if (!is_multicast_ether_addr(dmac_v))
-               return NULL;
-
-       /* MULTICAST FULL MATCH */
-       esw_mc = l2addr_hash_find(hash, dmac_v, struct esw_mc_addr);
-
-       return esw_mc ? esw_mc->uplink_rule : NULL;
-}
-
-static struct mlx5_flow_rule *alloc_fr(void *ft,
-                                      u8 match_criteria_enable,
-                                      u32 *match_criteria,
-                                      u32 *match_value,
-                                      u32 action,
-                                      u32 flow_tag)
-{
-       struct mlx5_flow_rule *fr = kzalloc(sizeof(*fr), GFP_KERNEL);
-
-       if (!fr)
-               return NULL;
-
-       fr->match_criteria = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
-       fr->match_value = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
-       if (!fr->match_criteria || !fr->match_value) {
-               kfree(fr->match_criteria);
-               kfree(fr->match_value);
-               kfree(fr);
-               return NULL;
-       }
-
-       memcpy(fr->match_criteria, match_criteria, MLX5_ST_SZ_BYTES(fte_match_param));
-       memcpy(fr->match_value, match_value, MLX5_ST_SZ_BYTES(fte_match_param));
-       fr->match_criteria_enable = match_criteria_enable;
-       fr->flow_tag = flow_tag;
-       fr->action = action;
-
-       mutex_init(&fr->mutex);
-       INIT_LIST_HEAD(&fr->dest_list);
-       atomic_set(&fr->refcount, 0);
-       fr->ft = ft;
-       return fr;
-}
-
-static void deref_fr(struct mlx5_flow_rule *fr)
-{
-       if (!atomic_dec_and_test(&fr->refcount))
-               return;
-
-       kfree(fr->match_criteria);
-       kfree(fr->match_value);
-       kfree(fr);
-}
-
-static struct mlx5_flow_rule *
-mlx5_add_flow_rule(struct mlx5_eswitch *esw,
-                  u8 match_criteria_enable,
-                  u32 *match_criteria,
-                  u32 *match_value,
-                  u32 action,
-                  u32 flow_tag,
-                  struct mlx5_flow_destination *dest)
-{
-       struct mlx5_flow_rule *fr;
-       int err;
-
-       fr = find_fr(esw, match_criteria_enable, match_value);
-       fr = fr ? fr : alloc_fr(esw->fdb_table.fdb, match_criteria_enable, match_criteria,
-                               match_value, action, flow_tag);
-       if (!fr)
-               return NULL;
-
-       atomic_inc(&fr->refcount);
-
-       err = mlx5_flow_rule_add_dest(fr, dest);
-       if (err) {
-               deref_fr(fr);
-               return NULL;
-       }
-
-       return fr;
-}
-
-static void mlx5_del_flow_rule(struct mlx5_flow_rule *fr, u32 vport)
-{
-       struct mlx5_flow_destination dest;
-
-       dest.vport_num = vport;
-       mlx5_flow_rule_del_dest(fr, &dest);
-       deref_fr(fr);
-}
-
 /* E-Switch FDB */
 static struct mlx5_flow_rule *
 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
@@ -569,7 +355,7 @@ esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
                  "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
                  dmac_v, dmac_c, vport);
        flow_rule =
-               mlx5_add_flow_rule(esw,
+               mlx5_add_flow_rule(esw->fdb_table.fdb,
                                   match_header,
                                   match_c,
                                   match_v,
@@ -589,33 +375,61 @@ out:
 
 static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
 {
+       int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
        struct mlx5_core_dev *dev = esw->dev;
-       struct mlx5_flow_table_group g;
+       struct mlx5_flow_namespace *root_ns;
        struct mlx5_flow_table *fdb;
+       struct mlx5_flow_group *g;
+       void *match_criteria;
+       int table_size;
+       u32 *flow_group_in;
        u8 *dmac;
+       int err = 0;
 
        esw_debug(dev, "Create FDB log_max_size(%d)\n",
                  MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
 
-       memset(&g, 0, sizeof(g));
-       /* UC MC Full match rules*/
-       g.log_sz = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
-       g.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-       dmac = MLX5_ADDR_OF(fte_match_param, g.match_criteria,
-                           outer_headers.dmac_47_16);
-       /* Match criteria mask */
-       memset(dmac, 0xff, 6);
+       root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+       if (!root_ns) {
+               esw_warn(dev, "Failed to get FDB flow namespace\n");
+               return -ENOMEM;
+       }
 
-       fdb = mlx5_create_flow_table(dev, 0,
-                                    MLX5_FLOW_TABLE_TYPE_ESWITCH,
-                                    1, &g);
-       if (fdb)
-               esw_debug(dev, "ESW: FDB Table created fdb->id %d\n", mlx5_get_flow_table_id(fdb));
-       else
-               esw_warn(dev, "ESW: Failed to create FDB Table\n");
+       flow_group_in = mlx5_vzalloc(inlen);
+       if (!flow_group_in)
+               return -ENOMEM;
+       memset(flow_group_in, 0, inlen);
+
+       table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
+       fdb = mlx5_create_flow_table(root_ns, 0, table_size);
+       if (IS_ERR_OR_NULL(fdb)) {
+               err = PTR_ERR(fdb);
+               esw_warn(dev, "Failed to create FDB Table err %d\n", err);
+               goto out;
+       }
 
+       MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+                MLX5_MATCH_OUTER_HEADERS);
+       match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+       dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
+       MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+       MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
+       eth_broadcast_addr(dmac);
+
+       g = mlx5_create_flow_group(fdb, flow_group_in);
+       if (IS_ERR_OR_NULL(g)) {
+               err = PTR_ERR(g);
+               esw_warn(dev, "Failed to create flow group err(%d)\n", err);
+               goto out;
+       }
+
+       esw->fdb_table.addr_grp = g;
        esw->fdb_table.fdb = fdb;
-       return fdb ? 0 : -ENOMEM;
+out:
+       kfree(flow_group_in);
+       if (err && !IS_ERR_OR_NULL(fdb))
+               mlx5_destroy_flow_table(fdb);
+       return err;
 }
 
 static void esw_destroy_fdb_table(struct mlx5_eswitch *esw)
@@ -623,10 +437,11 @@ static void esw_destroy_fdb_table(struct mlx5_eswitch *esw)
        if (!esw->fdb_table.fdb)
                return;
 
-       esw_debug(esw->dev, "Destroy FDB Table fdb(%d)\n",
-                 mlx5_get_flow_table_id(esw->fdb_table.fdb));
+       esw_debug(esw->dev, "Destroy FDB Table\n");
+       mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
        mlx5_destroy_flow_table(esw->fdb_table.fdb);
        esw->fdb_table.fdb = NULL;
+       esw->fdb_table.addr_grp = NULL;
 }
 
 /* E-Switch vport UC/MC lists management */
@@ -689,7 +504,7 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
        del_l2_table_entry(esw->dev, esw_uc->table_index);
 
        if (vaddr->flow_rule)
-               mlx5_del_flow_rule(vaddr->flow_rule, vport);
+               mlx5_del_flow_rule(vaddr->flow_rule);
        vaddr->flow_rule = NULL;
 
        l2addr_hash_del(esw_uc);
@@ -750,14 +565,14 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
                  esw_mc->uplink_rule);
 
        if (vaddr->flow_rule)
-               mlx5_del_flow_rule(vaddr->flow_rule, vport);
+               mlx5_del_flow_rule(vaddr->flow_rule);
        vaddr->flow_rule = NULL;
 
        if (--esw_mc->refcnt)
                return 0;
 
        if (esw_mc->uplink_rule)
-               mlx5_del_flow_rule(esw_mc->uplink_rule, UPLINK_VPORT);
+               mlx5_del_flow_rule(esw_mc->uplink_rule);
 
        l2addr_hash_del(esw_mc);
        return 0;
index 02ff3eade026052d6edbabacbec99410b3468847..3416a428f70fe0766af3c9854b74e4ef00ce7d4b 100644 (file)
@@ -88,20 +88,6 @@ struct l2addr_node {
        kfree(ptr);                                         \
 })
 
-struct mlx5_flow_rule {
-       void             *ft;
-       u32              fi;
-       u8               match_criteria_enable;
-       u32              *match_criteria;
-       u32              *match_value;
-       u32              action;
-       u32              flow_tag;
-       bool             valid;
-       atomic_t         refcount;
-       struct mutex     mutex; /* protect flow rule updates */
-       struct list_head dest_list;
-};
-
 struct mlx5_vport {
        struct mlx5_core_dev    *dev;
        int                     vport;
@@ -126,6 +112,7 @@ struct mlx5_l2_table {
 
 struct mlx5_eswitch_fdb {
        void *fdb;
+       struct mlx5_flow_group *addr_grp;
 };
 
 struct mlx5_eswitch {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c
deleted file mode 100644 (file)
index ca90b9b..0000000
+++ /dev/null
@@ -1,422 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/flow_table.h>
-#include "mlx5_core.h"
-
-struct mlx5_ftg {
-       struct mlx5_flow_table_group    g;
-       u32                             id;
-       u32                             start_ix;
-};
-
-struct mlx5_flow_table {
-       struct mlx5_core_dev    *dev;
-       u8                      level;
-       u8                      type;
-       u32                     id;
-       struct mutex            mutex; /* sync bitmap alloc */
-       u16                     num_groups;
-       struct mlx5_ftg         *group;
-       unsigned long           *bitmap;
-       u32                     size;
-};
-
-static int mlx5_set_flow_entry_cmd(struct mlx5_flow_table *ft, u32 group_ix,
-                                  u32 flow_index, void *flow_context)
-{
-       u32 out[MLX5_ST_SZ_DW(set_fte_out)];
-       u32 *in;
-       void *in_flow_context;
-       int fcdls =
-               MLX5_GET(flow_context, flow_context, destination_list_size) *
-               MLX5_ST_SZ_BYTES(dest_format_struct);
-       int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fcdls;
-       int err;
-
-       in = mlx5_vzalloc(inlen);
-       if (!in) {
-               mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
-               return -ENOMEM;
-       }
-
-       MLX5_SET(set_fte_in, in, table_type, ft->type);
-       MLX5_SET(set_fte_in, in, table_id,   ft->id);
-       MLX5_SET(set_fte_in, in, flow_index, flow_index);
-       MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
-
-       in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
-       memcpy(in_flow_context, flow_context,
-              MLX5_ST_SZ_BYTES(flow_context) + fcdls);
-
-       MLX5_SET(flow_context, in_flow_context, group_id,
-                ft->group[group_ix].id);
-
-       memset(out, 0, sizeof(out));
-       err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
-                                        sizeof(out));
-       kvfree(in);
-
-       return err;
-}
-
-static void mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index)
-{
-       u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
-       u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
-
-       memset(in, 0, sizeof(in));
-       memset(out, 0, sizeof(out));
-
-#define MLX5_SET_DFTEI(p, x, v) MLX5_SET(delete_fte_in, p, x, v)
-       MLX5_SET_DFTEI(in, table_type, ft->type);
-       MLX5_SET_DFTEI(in, table_id,   ft->id);
-       MLX5_SET_DFTEI(in, flow_index, flow_index);
-       MLX5_SET_DFTEI(in, opcode,     MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
-
-       mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
-}
-
-static void mlx5_destroy_flow_group_cmd(struct mlx5_flow_table *ft, int i)
-{
-       u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
-       u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
-
-       memset(in, 0, sizeof(in));
-       memset(out, 0, sizeof(out));
-
-#define MLX5_SET_DFGI(p, x, v) MLX5_SET(destroy_flow_group_in, p, x, v)
-       MLX5_SET_DFGI(in, table_type, ft->type);
-       MLX5_SET_DFGI(in, table_id,   ft->id);
-       MLX5_SET_DFGI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
-       MLX5_SET_DFGI(in, group_id, ft->group[i].id);
-       mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
-}
-
-static int mlx5_create_flow_group_cmd(struct mlx5_flow_table *ft, int i)
-{
-       u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
-       u32 *in;
-       void *in_match_criteria;
-       int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
-       struct mlx5_flow_table_group *g = &ft->group[i].g;
-       u32 start_ix = ft->group[i].start_ix;
-       u32 end_ix = start_ix + (1 << g->log_sz) - 1;
-       int err;
-
-       in = mlx5_vzalloc(inlen);
-       if (!in) {
-               mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
-               return -ENOMEM;
-       }
-       in_match_criteria = MLX5_ADDR_OF(create_flow_group_in, in,
-                                        match_criteria);
-
-       memset(out, 0, sizeof(out));
-
-#define MLX5_SET_CFGI(p, x, v) MLX5_SET(create_flow_group_in, p, x, v)
-       MLX5_SET_CFGI(in, table_type,            ft->type);
-       MLX5_SET_CFGI(in, table_id,              ft->id);
-       MLX5_SET_CFGI(in, opcode,                MLX5_CMD_OP_CREATE_FLOW_GROUP);
-       MLX5_SET_CFGI(in, start_flow_index,      start_ix);
-       MLX5_SET_CFGI(in, end_flow_index,        end_ix);
-       MLX5_SET_CFGI(in, match_criteria_enable, g->match_criteria_enable);
-
-       memcpy(in_match_criteria, g->match_criteria,
-              MLX5_ST_SZ_BYTES(fte_match_param));
-
-       err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
-                                        sizeof(out));
-       if (!err)
-               ft->group[i].id = MLX5_GET(create_flow_group_out, out,
-                                          group_id);
-
-       kvfree(in);
-
-       return err;
-}
-
-static void mlx5_destroy_flow_table_groups(struct mlx5_flow_table *ft)
-{
-       int i;
-
-       for (i = 0; i < ft->num_groups; i++)
-               mlx5_destroy_flow_group_cmd(ft, i);
-}
-
-static int mlx5_create_flow_table_groups(struct mlx5_flow_table *ft)
-{
-       int err;
-       int i;
-
-       for (i = 0; i < ft->num_groups; i++) {
-               err = mlx5_create_flow_group_cmd(ft, i);
-               if (err)
-                       goto err_destroy_flow_table_groups;
-       }
-
-       return 0;
-
-err_destroy_flow_table_groups:
-       for (i--; i >= 0; i--)
-               mlx5_destroy_flow_group_cmd(ft, i);
-
-       return err;
-}
-
-static int mlx5_create_flow_table_cmd(struct mlx5_flow_table *ft)
-{
-       u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
-       u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
-       int err;
-
-       memset(in, 0, sizeof(in));
-
-       MLX5_SET(create_flow_table_in, in, table_type, ft->type);
-       MLX5_SET(create_flow_table_in, in, level,      ft->level);
-       MLX5_SET(create_flow_table_in, in, log_size,   order_base_2(ft->size));
-
-       MLX5_SET(create_flow_table_in, in, opcode,
-                MLX5_CMD_OP_CREATE_FLOW_TABLE);
-
-       memset(out, 0, sizeof(out));
-       err = mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out,
-                                        sizeof(out));
-       if (err)
-               return err;
-
-       ft->id = MLX5_GET(create_flow_table_out, out, table_id);
-
-       return 0;
-}
-
-static void mlx5_destroy_flow_table_cmd(struct mlx5_flow_table *ft)
-{
-       u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
-       u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
-
-       memset(in, 0, sizeof(in));
-       memset(out, 0, sizeof(out));
-
-#define MLX5_SET_DFTI(p, x, v) MLX5_SET(destroy_flow_table_in, p, x, v)
-       MLX5_SET_DFTI(in, table_type, ft->type);
-       MLX5_SET_DFTI(in, table_id,   ft->id);
-       MLX5_SET_DFTI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE);
-
-       mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
-}
-
-static int mlx5_find_group(struct mlx5_flow_table *ft, u8 match_criteria_enable,
-                          u32 *match_criteria, int *group_ix)
-{
-       void *mc_outer = MLX5_ADDR_OF(fte_match_param, match_criteria,
-                                     outer_headers);
-       void *mc_misc  = MLX5_ADDR_OF(fte_match_param, match_criteria,
-                                     misc_parameters);
-       void *mc_inner = MLX5_ADDR_OF(fte_match_param, match_criteria,
-                                     inner_headers);
-       int mc_outer_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
-       int mc_misc_sz  = MLX5_ST_SZ_BYTES(fte_match_set_misc);
-       int mc_inner_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
-       int i;
-
-       for (i = 0; i < ft->num_groups; i++) {
-               struct mlx5_flow_table_group *g = &ft->group[i].g;
-               void *gmc_outer = MLX5_ADDR_OF(fte_match_param,
-                                              g->match_criteria,
-                                              outer_headers);
-               void *gmc_misc  = MLX5_ADDR_OF(fte_match_param,
-                                              g->match_criteria,
-                                              misc_parameters);
-               void *gmc_inner = MLX5_ADDR_OF(fte_match_param,
-                                              g->match_criteria,
-                                              inner_headers);
-
-               if (g->match_criteria_enable != match_criteria_enable)
-                       continue;
-
-               if (match_criteria_enable & MLX5_MATCH_OUTER_HEADERS)
-                       if (memcmp(mc_outer, gmc_outer, mc_outer_sz))
-                               continue;
-
-               if (match_criteria_enable & MLX5_MATCH_MISC_PARAMETERS)
-                       if (memcmp(mc_misc, gmc_misc, mc_misc_sz))
-                               continue;
-
-               if (match_criteria_enable & MLX5_MATCH_INNER_HEADERS)
-                       if (memcmp(mc_inner, gmc_inner, mc_inner_sz))
-                               continue;
-
-               *group_ix = i;
-               return 0;
-       }
-
-       return -EINVAL;
-}
-
-static int alloc_flow_index(struct mlx5_flow_table *ft, int group_ix, u32 *ix)
-{
-       struct mlx5_ftg *g = &ft->group[group_ix];
-       int err = 0;
-
-       mutex_lock(&ft->mutex);
-
-       *ix = find_next_zero_bit(ft->bitmap, ft->size, g->start_ix);
-       if (*ix >= (g->start_ix + (1 << g->g.log_sz)))
-               err = -ENOSPC;
-       else
-               __set_bit(*ix, ft->bitmap);
-
-       mutex_unlock(&ft->mutex);
-
-       return err;
-}
-
-static void mlx5_free_flow_index(struct mlx5_flow_table *ft, u32 ix)
-{
-       __clear_bit(ix, ft->bitmap);
-}
-
-int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
-                             void *match_criteria, void *flow_context,
-                             u32 *flow_index)
-{
-       struct mlx5_flow_table *ft = flow_table;
-       int group_ix;
-       int err;
-
-       err = mlx5_find_group(ft, match_criteria_enable, match_criteria,
-                             &group_ix);
-       if (err) {
-               mlx5_core_warn(ft->dev, "mlx5_find_group failed\n");
-               return err;
-       }
-
-       err = alloc_flow_index(ft, group_ix, flow_index);
-       if (err) {
-               mlx5_core_warn(ft->dev, "alloc_flow_index failed\n");
-               return err;
-       }
-
-       return mlx5_set_flow_entry_cmd(ft, group_ix, *flow_index, flow_context);
-}
-EXPORT_SYMBOL(mlx5_add_flow_table_entry);
-
-void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index)
-{
-       struct mlx5_flow_table *ft = flow_table;
-
-       mlx5_del_flow_entry_cmd(ft, flow_index);
-       mlx5_free_flow_index(ft, flow_index);
-}
-EXPORT_SYMBOL(mlx5_del_flow_table_entry);
-
-void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
-                            u16 num_groups,
-                            struct mlx5_flow_table_group *group)
-{
-       struct mlx5_flow_table *ft;
-       u32 start_ix = 0;
-       u32 ft_size = 0;
-       void *gr;
-       void *bm;
-       int err;
-       int i;
-
-       for (i = 0; i < num_groups; i++)
-               ft_size += (1 << group[i].log_sz);
-
-       ft = kzalloc(sizeof(*ft), GFP_KERNEL);
-       gr = kcalloc(num_groups, sizeof(struct mlx5_ftg), GFP_KERNEL);
-       bm = kcalloc(BITS_TO_LONGS(ft_size), sizeof(uintptr_t), GFP_KERNEL);
-       if (!ft || !gr || !bm)
-               goto err_free_ft;
-
-       ft->group       = gr;
-       ft->bitmap      = bm;
-       ft->num_groups  = num_groups;
-       ft->level       = level;
-       ft->type        = table_type;
-       ft->size        = ft_size;
-       ft->dev         = dev;
-       mutex_init(&ft->mutex);
-
-       for (i = 0; i < ft->num_groups; i++) {
-               memcpy(&ft->group[i].g, &group[i], sizeof(*group));
-               ft->group[i].start_ix = start_ix;
-               start_ix += 1 << group[i].log_sz;
-       }
-
-       err = mlx5_create_flow_table_cmd(ft);
-       if (err)
-               goto err_free_ft;
-
-       err = mlx5_create_flow_table_groups(ft);
-       if (err)
-               goto err_destroy_flow_table_cmd;
-
-       return ft;
-
-err_destroy_flow_table_cmd:
-       mlx5_destroy_flow_table_cmd(ft);
-
-err_free_ft:
-       mlx5_core_warn(dev, "failed to alloc flow table\n");
-       kfree(bm);
-       kfree(gr);
-       kfree(ft);
-
-       return NULL;
-}
-EXPORT_SYMBOL(mlx5_create_flow_table);
-
-void mlx5_destroy_flow_table(void *flow_table)
-{
-       struct mlx5_flow_table *ft = flow_table;
-
-       mlx5_destroy_flow_table_groups(ft);
-       mlx5_destroy_flow_table_cmd(ft);
-       kfree(ft->bitmap);
-       kfree(ft->group);
-       kfree(ft);
-}
-EXPORT_SYMBOL(mlx5_destroy_flow_table);
-
-u32 mlx5_get_flow_table_id(void *flow_table)
-{
-       struct mlx5_flow_table *ft = flow_table;
-
-       return ft->id;
-}
-EXPORT_SYMBOL(mlx5_get_flow_table_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
new file mode 100644 (file)
index 0000000..5096f4f
--- /dev/null
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/mlx5_ifc.h>
+
+#include "fs_core.h"
+#include "fs_cmd.h"
+#include "mlx5_core.h"
+
+int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
+                              enum fs_flow_table_type type, unsigned int level,
+                              unsigned int log_size, unsigned int *table_id)
+{
+       u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
+       u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
+       int err;
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(create_flow_table_in, in, opcode,
+                MLX5_CMD_OP_CREATE_FLOW_TABLE);
+
+       MLX5_SET(create_flow_table_in, in, table_type, type);
+       MLX5_SET(create_flow_table_in, in, level, level);
+       MLX5_SET(create_flow_table_in, in, log_size, log_size);
+
+       memset(out, 0, sizeof(out));
+       err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+                                        sizeof(out));
+
+       if (!err)
+               *table_id = MLX5_GET(create_flow_table_out, out,
+                                    table_id);
+       return err;
+}
+
+int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
+                               struct mlx5_flow_table *ft)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
+       u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
+
+       memset(in, 0, sizeof(in));
+       memset(out, 0, sizeof(out));
+
+       MLX5_SET(destroy_flow_table_in, in, opcode,
+                MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+       MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
+       MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
+
+       return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+                                         sizeof(out));
+}
+
+int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
+                              struct mlx5_flow_table *ft,
+                              u32 *in,
+                              unsigned int *group_id)
+{
+       int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+       u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
+       int err;
+
+       memset(out, 0, sizeof(out));
+
+       MLX5_SET(create_flow_group_in, in, opcode,
+                MLX5_CMD_OP_CREATE_FLOW_GROUP);
+       MLX5_SET(create_flow_group_in, in, table_type, ft->type);
+       MLX5_SET(create_flow_group_in, in, table_id, ft->id);
+
+       err = mlx5_cmd_exec_check_status(dev, in,
+                                        inlen, out,
+                                        sizeof(out));
+       if (!err)
+               *group_id = MLX5_GET(create_flow_group_out, out,
+                                    group_id);
+
+       return err;
+}
+
+int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
+                               struct mlx5_flow_table *ft,
+                               unsigned int group_id)
+{
+       u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
+       u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
+
+       memset(in, 0, sizeof(in));
+       memset(out, 0, sizeof(out));
+
+       MLX5_SET(destroy_flow_group_in, in, opcode,
+                MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+       MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
+       MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
+       MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
+
+       return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+                                         sizeof(out));
+}
+
+static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
+                           int opmod, int modify_mask,
+                           struct mlx5_flow_table *ft,
+                           unsigned group_id,
+                           struct fs_fte *fte)
+{
+       unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
+               fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
+       u32 out[MLX5_ST_SZ_DW(set_fte_out)];
+       struct mlx5_flow_rule *dst;
+       void *in_flow_context;
+       void *in_match_value;
+       void *in_dests;
+       u32 *in;
+       int err;
+
+       in = mlx5_vzalloc(inlen);
+       if (!in) {
+               mlx5_core_warn(dev, "failed to allocate inbox\n");
+               return -ENOMEM;
+       }
+
+       MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+       MLX5_SET(set_fte_in, in, op_mod, opmod);
+       MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
+       MLX5_SET(set_fte_in, in, table_type, ft->type);
+       MLX5_SET(set_fte_in, in, table_id,   ft->id);
+       MLX5_SET(set_fte_in, in, flow_index, fte->index);
+
+       in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+       MLX5_SET(flow_context, in_flow_context, group_id, group_id);
+       MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
+       MLX5_SET(flow_context, in_flow_context, action, fte->action);
+       MLX5_SET(flow_context, in_flow_context, destination_list_size,
+                fte->dests_size);
+       in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
+                                     match_value);
+       memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
+
+       in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+       list_for_each_entry(dst, &fte->node.children, node.list) {
+               unsigned int id;
+
+               MLX5_SET(dest_format_struct, in_dests, destination_type,
+                        dst->dest_attr.type);
+               if (dst->dest_attr.type ==
+                   MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+                       id = dst->dest_attr.ft->id;
+               else
+                       id = dst->dest_attr.tir_num;
+               MLX5_SET(dest_format_struct, in_dests, destination_id, id);
+               in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
+       }
+       memset(out, 0, sizeof(out));
+       err = mlx5_cmd_exec_check_status(dev, in, inlen, out,
+                                        sizeof(out));
+       kvfree(in);
+
+       return err;
+}
+
+int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
+                       struct mlx5_flow_table *ft,
+                       unsigned group_id,
+                       struct fs_fte *fte)
+{
+       return  mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
+}
+
+int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
+                       struct mlx5_flow_table *ft,
+                       unsigned group_id,
+                       struct fs_fte *fte)
+{
+       int opmod;
+       int modify_mask;
+       int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
+                                               flow_table_properties_nic_receive.
+                                               flow_modify_en);
+       if (!atomic_mod_cap)
+               return -ENOTSUPP;
+       opmod = 1;
+       modify_mask = 1 <<
+               MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST;
+
+       return  mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
+}
+
+int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
+                       struct mlx5_flow_table *ft,
+                       unsigned int index)
+{
+       u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
+       u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
+       int err;
+
+       memset(in, 0, sizeof(in));
+       memset(out, 0, sizeof(out));
+
+       MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+       MLX5_SET(delete_fte_in, in, table_type, ft->type);
+       MLX5_SET(delete_fte_in, in, table_id, ft->id);
+       MLX5_SET(delete_fte_in, in, flow_index, index);
+
+       err =  mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+
+       return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
new file mode 100644 (file)
index 0000000..f39304e
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _MLX5_FS_CMD_
+#define _MLX5_FS_CMD_
+
+int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
+                              enum fs_flow_table_type type, unsigned int level,
+                              unsigned int log_size, unsigned int *table_id);
+
+int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
+                               struct mlx5_flow_table *ft);
+
+int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
+                              struct mlx5_flow_table *ft,
+                              u32 *in, unsigned int *group_id);
+
+int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
+                               struct mlx5_flow_table *ft,
+                               unsigned int group_id);
+
+int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
+                       struct mlx5_flow_table *ft,
+                       unsigned group_id,
+                       struct fs_fte *fte);
+
+int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
+                       struct mlx5_flow_table *ft,
+                       unsigned group_id,
+                       struct fs_fte *fte);
+
+int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
+                       struct mlx5_flow_table *ft,
+                       unsigned int index);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
new file mode 100644 (file)
index 0000000..f7d62fe
--- /dev/null
@@ -0,0 +1,1047 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mutex.h>
+#include <linux/mlx5/driver.h>
+
+#include "mlx5_core.h"
+#include "fs_core.h"
+#include "fs_cmd.h"
+
+#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
+                                        sizeof(struct init_tree_node))
+
+#define INIT_PRIO(min_level_val, max_ft_val,\
+                 start_level_val, ...) {.type = FS_TYPE_PRIO,\
+       .min_ft_level = min_level_val,\
+       .start_level = start_level_val,\
+       .max_ft = max_ft_val,\
+       .children = (struct init_tree_node[]) {__VA_ARGS__},\
+       .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
+}
+
+#define ADD_PRIO(min_level_val, max_ft_val, start_level_val, ...)\
+       INIT_PRIO(min_level_val, max_ft_val, start_level_val,\
+                 __VA_ARGS__)\
+
+#define ADD_FT_PRIO(max_ft_val, start_level_val, ...)\
+       INIT_PRIO(0, max_ft_val, start_level_val,\
+                 __VA_ARGS__)\
+
+#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
+       .children = (struct init_tree_node[]) {__VA_ARGS__},\
+       .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
+}
+
+#define KERNEL_START_LEVEL 0
+#define KERNEL_P0_START_LEVEL KERNEL_START_LEVEL
+#define KERNEL_MAX_FT 2
+#define KENREL_MIN_LEVEL 2
+static struct init_tree_node {
+       enum fs_node_type       type;
+       struct init_tree_node *children;
+       int ar_size;
+       int min_ft_level;
+       int prio;
+       int max_ft;
+       int start_level;
+} root_fs = {
+       .type = FS_TYPE_NAMESPACE,
+       .ar_size = 1,
+       .children = (struct init_tree_node[]) {
+               ADD_PRIO(KENREL_MIN_LEVEL, KERNEL_MAX_FT,
+                        KERNEL_START_LEVEL,
+                        ADD_NS(ADD_FT_PRIO(KERNEL_MAX_FT,
+                                           KERNEL_P0_START_LEVEL))),
+       }
+};
+
+static void del_rule(struct fs_node *node);
+static void del_flow_table(struct fs_node *node);
+static void del_flow_group(struct fs_node *node);
+static void del_fte(struct fs_node *node);
+
+static void tree_init_node(struct fs_node *node,
+                          unsigned int refcount,
+                          void (*remove_func)(struct fs_node *))
+{
+       atomic_set(&node->refcount, refcount);
+       INIT_LIST_HEAD(&node->list);
+       INIT_LIST_HEAD(&node->children);
+       mutex_init(&node->lock);
+       node->remove_func = remove_func;
+}
+
+static void tree_add_node(struct fs_node *node, struct fs_node *parent)
+{
+       if (parent)
+               atomic_inc(&parent->refcount);
+       node->parent = parent;
+
+       /* Parent is the root */
+       if (!parent)
+               node->root = node;
+       else
+               node->root = parent->root;
+}
+
+static void tree_get_node(struct fs_node *node)
+{
+       atomic_inc(&node->refcount);
+}
+
+static void nested_lock_ref_node(struct fs_node *node)
+{
+       if (node) {
+               mutex_lock_nested(&node->lock, SINGLE_DEPTH_NESTING);
+               atomic_inc(&node->refcount);
+       }
+}
+
+static void lock_ref_node(struct fs_node *node)
+{
+       if (node) {
+               mutex_lock(&node->lock);
+               atomic_inc(&node->refcount);
+       }
+}
+
+static void unlock_ref_node(struct fs_node *node)
+{
+       if (node) {
+               atomic_dec(&node->refcount);
+               mutex_unlock(&node->lock);
+       }
+}
+
+static void tree_put_node(struct fs_node *node)
+{
+       struct fs_node *parent_node = node->parent;
+
+       lock_ref_node(parent_node);
+       if (atomic_dec_and_test(&node->refcount)) {
+               if (parent_node)
+                       list_del_init(&node->list);
+               if (node->remove_func)
+                       node->remove_func(node);
+               kfree(node);
+               node = NULL;
+       }
+       unlock_ref_node(parent_node);
+       if (!node && parent_node)
+               tree_put_node(parent_node);
+}
+
+static int tree_remove_node(struct fs_node *node)
+{
+       if (atomic_read(&node->refcount) > 1)
+               return -EPERM;
+       tree_put_node(node);
+       return 0;
+}
+
+static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
+                                unsigned int prio)
+{
+       struct fs_prio *iter_prio;
+
+       fs_for_each_prio(iter_prio, ns) {
+               if (iter_prio->prio == prio)
+                       return iter_prio;
+       }
+
+       return NULL;
+}
+
+static unsigned int find_next_free_level(struct fs_prio *prio)
+{
+       if (!list_empty(&prio->node.children)) {
+               struct mlx5_flow_table *ft;
+
+               ft = list_last_entry(&prio->node.children,
+                                    struct mlx5_flow_table,
+                                    node.list);
+               return ft->level + 1;
+       }
+       return prio->start_level;
+}
+
+static bool masked_memcmp(void *mask, void *val1, void *val2, size_t size)
+{
+       unsigned int i;
+
+       for (i = 0; i < size; i++, mask++, val1++, val2++)
+               if ((*((u8 *)val1) & (*(u8 *)mask)) !=
+                   ((*(u8 *)val2) & (*(u8 *)mask)))
+                       return false;
+
+       return true;
+}
+
+static bool compare_match_value(struct mlx5_flow_group_mask *mask,
+                               void *fte_param1, void *fte_param2)
+{
+       if (mask->match_criteria_enable &
+           1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) {
+               void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
+                                               fte_param1, outer_headers);
+               void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
+                                               fte_param2, outer_headers);
+               void *fte_mask = MLX5_ADDR_OF(fte_match_param,
+                                             mask->match_criteria, outer_headers);
+
+               if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
+                                  MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
+                       return false;
+       }
+
+       if (mask->match_criteria_enable &
+           1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) {
+               void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
+                                               fte_param1, misc_parameters);
+               void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
+                                               fte_param2, misc_parameters);
+               void *fte_mask = MLX5_ADDR_OF(fte_match_param,
+                                         mask->match_criteria, misc_parameters);
+
+               if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
+                                  MLX5_ST_SZ_BYTES(fte_match_set_misc)))
+                       return false;
+       }
+
+       if (mask->match_criteria_enable &
+           1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) {
+               void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
+                                               fte_param1, inner_headers);
+               void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
+                                               fte_param2, inner_headers);
+               void *fte_mask = MLX5_ADDR_OF(fte_match_param,
+                                         mask->match_criteria, inner_headers);
+
+               if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
+                                  MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
+                       return false;
+       }
+       return true;
+}
+
+static bool compare_match_criteria(u8 match_criteria_enable1,
+                                  u8 match_criteria_enable2,
+                                  void *mask1, void *mask2)
+{
+       return match_criteria_enable1 == match_criteria_enable2 &&
+               !memcmp(mask1, mask2, MLX5_ST_SZ_BYTES(fte_match_param));
+}
+
+static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
+{
+       struct fs_node *root;
+       struct mlx5_flow_namespace *ns;
+
+       root = node->root;
+
+       if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
+               pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
+               return NULL;
+       }
+
+       ns = container_of(root, struct mlx5_flow_namespace, node);
+       return container_of(ns, struct mlx5_flow_root_namespace, ns);
+}
+
+static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
+{
+       struct mlx5_flow_root_namespace *root = find_root(node);
+
+       if (root)
+               return root->dev;
+       return NULL;
+}
+
+static void del_flow_table(struct fs_node *node)
+{
+       struct mlx5_flow_table *ft;
+       struct mlx5_core_dev *dev;
+       struct fs_prio *prio;
+       int err;
+
+       fs_get_obj(ft, node);
+       dev = get_dev(&ft->node);
+
+       err = mlx5_cmd_destroy_flow_table(dev, ft);
+       if (err)
+               pr_warn("flow steering can't destroy ft\n");
+       fs_get_obj(prio, ft->node.parent);
+       prio->num_ft--;
+}
+
+static void del_rule(struct fs_node *node)
+{
+       struct mlx5_flow_rule *rule;
+       struct mlx5_flow_table *ft;
+       struct mlx5_flow_group *fg;
+       struct fs_fte *fte;
+       u32     *match_value;
+       struct mlx5_core_dev *dev = get_dev(node);
+       int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
+       int err;
+
+       match_value = mlx5_vzalloc(match_len);
+       if (!match_value) {
+               pr_warn("failed to allocate inbox\n");
+               return;
+       }
+
+       fs_get_obj(rule, node);
+       fs_get_obj(fte, rule->node.parent);
+       fs_get_obj(fg, fte->node.parent);
+       memcpy(match_value, fte->val, sizeof(fte->val));
+       fs_get_obj(ft, fg->node.parent);
+       list_del(&rule->node.list);
+       fte->dests_size--;
+       if (fte->dests_size) {
+               err = mlx5_cmd_update_fte(dev, ft,
+                                         fg->id, fte);
+               if (err)
+                       pr_warn("%s can't del rule fg id=%d fte_index=%d\n",
+                               __func__, fg->id, fte->index);
+       }
+       kvfree(match_value);
+}
+
+static void del_fte(struct fs_node *node)
+{
+       struct mlx5_flow_table *ft;
+       struct mlx5_flow_group *fg;
+       struct mlx5_core_dev *dev;
+       struct fs_fte *fte;
+       int err;
+
+       fs_get_obj(fte, node);
+       fs_get_obj(fg, fte->node.parent);
+       fs_get_obj(ft, fg->node.parent);
+
+       dev = get_dev(&ft->node);
+       err = mlx5_cmd_delete_fte(dev, ft,
+                                 fte->index);
+       if (err)
+               pr_warn("flow steering can't delete fte in index %d of flow group id %d\n",
+                       fte->index, fg->id);
+
+       fte->status = 0;
+       fg->num_ftes--;
+}
+
+static void del_flow_group(struct fs_node *node)
+{
+       struct mlx5_flow_group *fg;
+       struct mlx5_flow_table *ft;
+       struct mlx5_core_dev *dev;
+
+       fs_get_obj(fg, node);
+       fs_get_obj(ft, fg->node.parent);
+       dev = get_dev(&ft->node);
+
+       if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
+               pr_warn("flow steering can't destroy fg %d of ft %d\n",
+                       fg->id, ft->id);
+}
+
+static struct fs_fte *alloc_fte(u8 action,
+                               u32 flow_tag,
+                               u32 *match_value,
+                               unsigned int index)
+{
+       struct fs_fte *fte;
+
+       fte = kzalloc(sizeof(*fte), GFP_KERNEL);
+       if (!fte)
+               return ERR_PTR(-ENOMEM);
+
+       memcpy(fte->val, match_value, sizeof(fte->val));
+       fte->node.type =  FS_TYPE_FLOW_ENTRY;
+       fte->flow_tag = flow_tag;
+       fte->index = index;
+       fte->action = action;
+
+       return fte;
+}
+
+static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
+{
+       struct mlx5_flow_group *fg;
+       void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
+                                           create_fg_in, match_criteria);
+       u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
+                                           create_fg_in,
+                                           match_criteria_enable);
+       fg = kzalloc(sizeof(*fg), GFP_KERNEL);
+       if (!fg)
+               return ERR_PTR(-ENOMEM);
+
+       fg->mask.match_criteria_enable = match_criteria_enable;
+       memcpy(&fg->mask.match_criteria, match_criteria,
+              sizeof(fg->mask.match_criteria));
+       fg->node.type =  FS_TYPE_FLOW_GROUP;
+       fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in,
+                                  start_flow_index);
+       fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in,
+                               end_flow_index) - fg->start_index + 1;
+       return fg;
+}
+
+static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte,
+                                               enum fs_flow_table_type table_type)
+{
+       struct mlx5_flow_table *ft;
+
+       ft  = kzalloc(sizeof(*ft), GFP_KERNEL);
+       if (!ft)
+               return NULL;
+
+       ft->level = level;
+       ft->node.type = FS_TYPE_FLOW_TABLE;
+       ft->type = table_type;
+       ft->max_fte = max_fte;
+
+       return ft;
+}
+
+struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+                                              int prio,
+                                              int max_fte)
+{
+       struct mlx5_flow_table *ft;
+       int err;
+       int log_table_sz;
+       struct mlx5_flow_root_namespace *root =
+               find_root(&ns->node);
+       struct fs_prio *fs_prio = NULL;
+
+       if (!root) {
+               pr_err("mlx5: flow steering failed to find root of namespace\n");
+               return ERR_PTR(-ENODEV);
+       }
+
+       fs_prio = find_prio(ns, prio);
+       if (!fs_prio)
+               return ERR_PTR(-EINVAL);
+
+       lock_ref_node(&fs_prio->node);
+       if (fs_prio->num_ft == fs_prio->max_ft) {
+               err = -ENOSPC;
+               goto unlock_prio;
+       }
+
+       ft = alloc_flow_table(find_next_free_level(fs_prio),
+                             roundup_pow_of_two(max_fte),
+                             root->table_type);
+       if (!ft) {
+               err = -ENOMEM;
+               goto unlock_prio;
+       }
+
+       tree_init_node(&ft->node, 1, del_flow_table);
+       log_table_sz = ilog2(ft->max_fte);
+       err = mlx5_cmd_create_flow_table(root->dev, ft->type, ft->level,
+                                        log_table_sz, &ft->id);
+       if (err)
+               goto free_ft;
+
+       tree_add_node(&ft->node, &fs_prio->node);
+       list_add_tail(&ft->node.list, &fs_prio->node.children);
+       fs_prio->num_ft++;
+       unlock_ref_node(&fs_prio->node);
+
+       return ft;
+
+free_ft:
+       kfree(ft);
+unlock_prio:
+       unlock_ref_node(&fs_prio->node);
+       return ERR_PTR(err);
+}
+
+struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
+                                              u32 *fg_in)
+{
+       struct mlx5_flow_group *fg;
+       struct mlx5_core_dev *dev = get_dev(&ft->node);
+       int err;
+
+       if (!dev)
+               return ERR_PTR(-ENODEV);
+
+       fg = alloc_flow_group(fg_in);
+       if (IS_ERR(fg))
+               return fg;
+
+       lock_ref_node(&ft->node);
+       err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id);
+       if (err) {
+               kfree(fg);
+               unlock_ref_node(&ft->node);
+               return ERR_PTR(err);
+       }
+       /* Add node to tree */
+       tree_init_node(&fg->node, 1, del_flow_group);
+       tree_add_node(&fg->node, &ft->node);
+       /* Add node to group list */
+       list_add(&fg->node.list, ft->node.children.prev);
+       unlock_ref_node(&ft->node);
+
+       return fg;
+}
+
+static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
+{
+       struct mlx5_flow_rule *rule;
+
+       rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+       if (!rule)
+               return NULL;
+
+       rule->node.type = FS_TYPE_FLOW_DEST;
+       memcpy(&rule->dest_attr, dest, sizeof(*dest));
+
+       return rule;
+}
+
+/* fte should not be deleted while calling this function */
+static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte,
+                                          struct mlx5_flow_group *fg,
+                                          struct mlx5_flow_destination *dest)
+{
+       struct mlx5_flow_table *ft;
+       struct mlx5_flow_rule *rule;
+       int err;
+
+       rule = alloc_rule(dest);
+       if (!rule)
+               return ERR_PTR(-ENOMEM);
+
+       fs_get_obj(ft, fg->node.parent);
+       /* Add dest to dests list- added as first element after the head */
+       tree_init_node(&rule->node, 1, del_rule);
+       list_add_tail(&rule->node.list, &fte->node.children);
+       fte->dests_size++;
+       if (fte->dests_size == 1)
+               err = mlx5_cmd_create_fte(get_dev(&ft->node),
+                                         ft, fg->id, fte);
+       else
+               err = mlx5_cmd_update_fte(get_dev(&ft->node),
+                                         ft, fg->id, fte);
+       if (err)
+               goto free_rule;
+
+       fte->status |= FS_FTE_STATUS_EXISTING;
+
+       return rule;
+
+free_rule:
+       list_del(&rule->node.list);
+       kfree(rule);
+       fte->dests_size--;
+       return ERR_PTR(err);
+}
+
+/* Assumed fg is locked */
+static unsigned int get_free_fte_index(struct mlx5_flow_group *fg,
+                                      struct list_head **prev)
+{
+       struct fs_fte *fte;
+       unsigned int start = fg->start_index;
+
+       if (prev)
+               *prev = &fg->node.children;
+
+       /* assumed list is sorted by index */
+       fs_for_each_fte(fte, fg) {
+               if (fte->index != start)
+                       return start;
+               start++;
+               if (prev)
+                       *prev = &fte->node.list;
+       }
+
+       return start;
+}
+
+/* prev is output, prev->next = new_fte */
+static struct fs_fte *create_fte(struct mlx5_flow_group *fg,
+                                u32 *match_value,
+                                u8 action,
+                                u32 flow_tag,
+                                struct list_head **prev)
+{
+       struct fs_fte *fte;
+       int index;
+
+       index = get_free_fte_index(fg, prev);
+       fte = alloc_fte(action, flow_tag, match_value, index);
+       if (IS_ERR(fte))
+               return fte;
+
+       return fte;
+}
+
+/* Assuming parent fg(flow table) is locked */
+static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
+                                         u32 *match_value,
+                                         u8 action,
+                                         u32 flow_tag,
+                                         struct mlx5_flow_destination *dest)
+{
+       struct fs_fte *fte;
+       struct mlx5_flow_rule *rule;
+       struct mlx5_flow_table *ft;
+       struct list_head *prev;
+
+       lock_ref_node(&fg->node);
+       fs_for_each_fte(fte, fg) {
+               nested_lock_ref_node(&fte->node);
+               if (compare_match_value(&fg->mask, match_value, &fte->val) &&
+                   action == fte->action && flow_tag == fte->flow_tag) {
+                       rule = add_rule_fte(fte, fg, dest);
+                       unlock_ref_node(&fte->node);
+                       if (IS_ERR(rule))
+                               goto unlock_fg;
+                       else
+                               goto add_rule;
+               }
+               unlock_ref_node(&fte->node);
+       }
+       fs_get_obj(ft, fg->node.parent);
+       if (fg->num_ftes >= fg->max_ftes) {
+               rule = ERR_PTR(-ENOSPC);
+               goto unlock_fg;
+       }
+
+       fte = create_fte(fg, match_value, action, flow_tag, &prev);
+       if (IS_ERR(fte)) {
+               rule = (void *)fte;
+               goto unlock_fg;
+       }
+       tree_init_node(&fte->node, 0, del_fte);
+       rule = add_rule_fte(fte, fg, dest);
+       if (IS_ERR(rule)) {
+               kfree(fte);
+               goto unlock_fg;
+       }
+
+       fg->num_ftes++;
+
+       tree_add_node(&fte->node, &fg->node);
+       list_add(&fte->node.list, prev);
+add_rule:
+       tree_add_node(&rule->node, &fte->node);
+unlock_fg:
+       unlock_ref_node(&fg->node);
+       return rule;
+}
+
+struct mlx5_flow_rule *
+mlx5_add_flow_rule(struct mlx5_flow_table *ft,
+                  u8 match_criteria_enable,
+                  u32 *match_criteria,
+                  u32 *match_value,
+                  u32 action,
+                  u32 flow_tag,
+                  struct mlx5_flow_destination *dest)
+{
+       struct mlx5_flow_group *g;
+       struct mlx5_flow_rule *rule = ERR_PTR(-EINVAL);
+
+       tree_get_node(&ft->node);
+       lock_ref_node(&ft->node);
+       fs_for_each_fg(g, ft)
+               if (compare_match_criteria(g->mask.match_criteria_enable,
+                                          match_criteria_enable,
+                                          g->mask.match_criteria,
+                                          match_criteria)) {
+                       unlock_ref_node(&ft->node);
+                       rule = add_rule_fg(g, match_value,
+                                          action, flow_tag, dest);
+                       goto put;
+               }
+       unlock_ref_node(&ft->node);
+put:
+       tree_put_node(&ft->node);
+       return rule;
+}
+
+void mlx5_del_flow_rule(struct mlx5_flow_rule *rule)
+{
+       tree_remove_node(&rule->node);
+}
+
+int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
+{
+       if (tree_remove_node(&ft->node))
+               mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
+                              ft->id);
+
+       return 0;
+}
+
+void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
+{
+       if (tree_remove_node(&fg->node))
+               mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
+                              fg->id);
+}
+
+struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
+                                                   enum mlx5_flow_namespace_type type)
+{
+       struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
+       int prio;
+       static struct fs_prio *fs_prio;
+       struct mlx5_flow_namespace *ns;
+
+       if (!root_ns)
+               return NULL;
+
+       switch (type) {
+       case MLX5_FLOW_NAMESPACE_KERNEL:
+               prio = 0;
+               break;
+       case MLX5_FLOW_NAMESPACE_FDB:
+               if (dev->priv.fdb_root_ns)
+                       return &dev->priv.fdb_root_ns->ns;
+               else
+                       return NULL;
+       default:
+               return NULL;
+       }
+
+       fs_prio = find_prio(&root_ns->ns, prio);
+       if (!fs_prio)
+               return NULL;
+
+       ns = list_first_entry(&fs_prio->node.children,
+                             typeof(*ns),
+                             node.list);
+
+       return ns;
+}
+
+static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
+                                     unsigned prio, int max_ft,
+                                     int start_level)
+{
+       struct fs_prio *fs_prio;
+
+       fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
+       if (!fs_prio)
+               return ERR_PTR(-ENOMEM);
+
+       fs_prio->node.type = FS_TYPE_PRIO;
+       tree_init_node(&fs_prio->node, 1, NULL);
+       tree_add_node(&fs_prio->node, &ns->node);
+       fs_prio->max_ft = max_ft;
+       fs_prio->prio = prio;
+       fs_prio->start_level = start_level;
+       list_add_tail(&fs_prio->node.list, &ns->node.children);
+
+       return fs_prio;
+}
+
+static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
+                                                    *ns)
+{
+       ns->node.type = FS_TYPE_NAMESPACE;
+
+       return ns;
+}
+
+static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
+{
+       struct mlx5_flow_namespace      *ns;
+
+       ns = kzalloc(sizeof(*ns), GFP_KERNEL);
+       if (!ns)
+               return ERR_PTR(-ENOMEM);
+
+       fs_init_namespace(ns);
+       tree_init_node(&ns->node, 1, NULL);
+       tree_add_node(&ns->node, &prio->node);
+       list_add_tail(&ns->node.list, &prio->node.children);
+
+       return ns;
+}
+
+static int init_root_tree_recursive(int max_ft_level, struct init_tree_node *init_node,
+                                   struct fs_node *fs_parent_node,
+                                   struct init_tree_node *init_parent_node,
+                                   int index)
+{
+       struct mlx5_flow_namespace *fs_ns;
+       struct fs_prio *fs_prio;
+       struct fs_node *base;
+       int i;
+       int err;
+
+       if (init_node->type == FS_TYPE_PRIO) {
+               if (init_node->min_ft_level > max_ft_level)
+                       return -ENOTSUPP;
+
+               fs_get_obj(fs_ns, fs_parent_node);
+               fs_prio = fs_create_prio(fs_ns, index, init_node->max_ft,
+                                        init_node->start_level);
+               if (IS_ERR(fs_prio))
+                       return PTR_ERR(fs_prio);
+               base = &fs_prio->node;
+       } else if (init_node->type == FS_TYPE_NAMESPACE) {
+               fs_get_obj(fs_prio, fs_parent_node);
+               fs_ns = fs_create_namespace(fs_prio);
+               if (IS_ERR(fs_ns))
+                       return PTR_ERR(fs_ns);
+               base = &fs_ns->node;
+       } else {
+               return -EINVAL;
+       }
+       for (i = 0; i < init_node->ar_size; i++) {
+               err = init_root_tree_recursive(max_ft_level,
+                                              &init_node->children[i], base,
+                                              init_node, i);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int init_root_tree(int max_ft_level, struct init_tree_node *init_node,
+                         struct fs_node *fs_parent_node)
+{
+       int i;
+       struct mlx5_flow_namespace *fs_ns;
+       int err;
+
+       fs_get_obj(fs_ns, fs_parent_node);
+       for (i = 0; i < init_node->ar_size; i++) {
+               err = init_root_tree_recursive(max_ft_level,
+                                              &init_node->children[i],
+                                              &fs_ns->node,
+                                              init_node, i);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+
+static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev,
+                                                      enum fs_flow_table_type
+                                                      table_type)
+{
+       struct mlx5_flow_root_namespace *root_ns;
+       struct mlx5_flow_namespace *ns;
+
+       /* Create the root namespace */
+       root_ns = mlx5_vzalloc(sizeof(*root_ns));
+       if (!root_ns)
+               return NULL;
+
+       root_ns->dev = dev;
+       root_ns->table_type = table_type;
+
+       ns = &root_ns->ns;
+       fs_init_namespace(ns);
+       tree_init_node(&ns->node, 1, NULL);
+       tree_add_node(&ns->node, NULL);
+
+       return root_ns;
+}
+
+static int init_root_ns(struct mlx5_core_dev *dev)
+{
+       int max_ft_level = MLX5_CAP_FLOWTABLE(dev,
+                                             flow_table_properties_nic_receive.
+                                             max_ft_level);
+
+       dev->priv.root_ns = create_root_ns(dev, FS_FT_NIC_RX);
+       if (IS_ERR_OR_NULL(dev->priv.root_ns))
+               goto cleanup;
+
+       if (init_root_tree(max_ft_level, &root_fs, &dev->priv.root_ns->ns.node))
+               goto cleanup;
+
+       return 0;
+
+cleanup:
+       mlx5_cleanup_fs(dev);
+       return -ENOMEM;
+}
+
+static void cleanup_single_prio_root_ns(struct mlx5_core_dev *dev,
+                                       struct mlx5_flow_root_namespace *root_ns)
+{
+       struct fs_node *prio;
+
+       if (!root_ns)
+               return;
+
+       if (!list_empty(&root_ns->ns.node.children)) {
+               prio = list_first_entry(&root_ns->ns.node.children,
+                                       struct fs_node,
+                                list);
+               if (tree_remove_node(prio))
+                       mlx5_core_warn(dev,
+                                      "Flow steering priority wasn't destroyed, refcount > 1\n");
+       }
+       if (tree_remove_node(&root_ns->ns.node))
+               mlx5_core_warn(dev,
+                              "Flow steering namespace wasn't destroyed, refcount > 1\n");
+       root_ns = NULL;
+}
+
+static void cleanup_root_ns(struct mlx5_core_dev *dev)
+{
+       struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
+       struct fs_prio *iter_prio;
+
+       if (!MLX5_CAP_GEN(dev, nic_flow_table))
+               return;
+
+       if (!root_ns)
+               return;
+
+       /* stage 1 */
+       fs_for_each_prio(iter_prio, &root_ns->ns) {
+               struct fs_node *node;
+               struct mlx5_flow_namespace *iter_ns;
+
+               fs_for_each_ns_or_ft(node, iter_prio) {
+                       if (node->type == FS_TYPE_FLOW_TABLE)
+                               continue;
+                       fs_get_obj(iter_ns, node);
+                       while (!list_empty(&iter_ns->node.children)) {
+                               struct fs_prio *obj_iter_prio2;
+                               struct fs_node *iter_prio2 =
+                                       list_first_entry(&iter_ns->node.children,
+                                                        struct fs_node,
+                                                        list);
+
+                               fs_get_obj(obj_iter_prio2, iter_prio2);
+                               if (tree_remove_node(iter_prio2)) {
+                                       mlx5_core_warn(dev,
+                                                      "Priority %d wasn't destroyed, refcount > 1\n",
+                                                      obj_iter_prio2->prio);
+                                       return;
+                               }
+                       }
+               }
+       }
+
+       /* stage 2 */
+       fs_for_each_prio(iter_prio, &root_ns->ns) {
+               while (!list_empty(&iter_prio->node.children)) {
+                       struct fs_node *iter_ns =
+                               list_first_entry(&iter_prio->node.children,
+                                                struct fs_node,
+                                                list);
+                       if (tree_remove_node(iter_ns)) {
+                               mlx5_core_warn(dev,
+                                              "Namespace wasn't destroyed, refcount > 1\n");
+                               return;
+                       }
+               }
+       }
+
+       /* stage 3 */
+       while (!list_empty(&root_ns->ns.node.children)) {
+               struct fs_prio *obj_prio_node;
+               struct fs_node *prio_node =
+                       list_first_entry(&root_ns->ns.node.children,
+                                        struct fs_node,
+                                        list);
+
+               fs_get_obj(obj_prio_node, prio_node);
+               if (tree_remove_node(prio_node)) {
+                       mlx5_core_warn(dev,
+                                      "Priority %d wasn't destroyed, refcount > 1\n",
+                                      obj_prio_node->prio);
+                       return;
+               }
+       }
+
+       if (tree_remove_node(&root_ns->ns.node)) {
+               mlx5_core_warn(dev,
+                              "root namespace wasn't destroyed, refcount > 1\n");
+               return;
+       }
+
+       dev->priv.root_ns = NULL;
+}
+
+void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
+{
+       cleanup_root_ns(dev);
+       cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
+}
+
+static int init_fdb_root_ns(struct mlx5_core_dev *dev)
+{
+       struct fs_prio *prio;
+
+       dev->priv.fdb_root_ns = create_root_ns(dev, FS_FT_FDB);
+       if (!dev->priv.fdb_root_ns)
+               return -ENOMEM;
+
+       /* Create single prio */
+       prio = fs_create_prio(&dev->priv.fdb_root_ns->ns, 0, 1, 0);
+       if (IS_ERR(prio)) {
+               cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
+               return PTR_ERR(prio);
+       } else {
+               return 0;
+       }
+}
+
+int mlx5_init_fs(struct mlx5_core_dev *dev)
+{
+       int err = 0;
+
+       if (MLX5_CAP_GEN(dev, nic_flow_table)) {
+               err = init_root_ns(dev);
+               if (err)
+                       return err;
+       }
+       if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+               err = init_fdb_root_ns(dev);
+               if (err)
+                       cleanup_root_ns(dev);
+       }
+
+       return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
new file mode 100644 (file)
index 0000000..4ebb97f
--- /dev/null
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _MLX5_FS_CORE_
+#define _MLX5_FS_CORE_
+
+#include <linux/mlx5/fs.h>
+
+enum fs_node_type {
+       FS_TYPE_NAMESPACE,
+       FS_TYPE_PRIO,
+       FS_TYPE_FLOW_TABLE,
+       FS_TYPE_FLOW_GROUP,
+       FS_TYPE_FLOW_ENTRY,
+       FS_TYPE_FLOW_DEST
+};
+
+enum fs_flow_table_type {
+       FS_FT_NIC_RX     = 0x0,
+       FS_FT_FDB        = 0X4,
+};
+
+enum fs_fte_status {
+       FS_FTE_STATUS_EXISTING = 1UL << 0,
+};
+
+struct fs_node {
+       struct list_head        list;
+       struct list_head        children;
+       enum fs_node_type       type;
+       struct fs_node          *parent;
+       struct fs_node          *root;
+       /* lock the node for writing and traversing */
+       struct mutex            lock;
+       atomic_t                refcount;
+       void                    (*remove_func)(struct fs_node *);
+};
+
+struct mlx5_flow_rule {
+       struct fs_node                          node;
+       struct mlx5_flow_destination            dest_attr;
+};
+
+/* Type of children is mlx5_flow_group */
+struct mlx5_flow_table {
+       struct fs_node                  node;
+       u32                             id;
+       unsigned int                    max_fte;
+       unsigned int                    level;
+       enum fs_flow_table_type         type;
+};
+
+/* Type of children is mlx5_flow_rule */
+struct fs_fte {
+       struct fs_node                  node;
+       u32                             val[MLX5_ST_SZ_DW(fte_match_param)];
+       u32                             dests_size;
+       u32                             flow_tag;
+       u32                             index;
+       u32                             action;
+       enum fs_fte_status              status;
+};
+
+/* Type of children is mlx5_flow_table/namespace */
+struct fs_prio {
+       struct fs_node                  node;
+       unsigned int                    max_ft;
+       unsigned int                    start_level;
+       unsigned int                    prio;
+       unsigned int                    num_ft;
+};
+
+/* Type of children is fs_prio */
+struct mlx5_flow_namespace {
+       /* parent == NULL => root ns */
+       struct  fs_node                 node;
+};
+
+struct mlx5_flow_group_mask {
+       u8      match_criteria_enable;
+       u32     match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
+};
+
+/* Type of children is fs_fte */
+struct mlx5_flow_group {
+       struct fs_node                  node;
+       struct mlx5_flow_group_mask     mask;
+       u32                             start_index;
+       u32                             max_ftes;
+       u32                             num_ftes;
+       u32                             id;
+};
+
+struct mlx5_flow_root_namespace {
+       struct mlx5_flow_namespace      ns;
+       enum   fs_flow_table_type       table_type;
+       struct mlx5_core_dev            *dev;
+};
+
+int mlx5_init_fs(struct mlx5_core_dev *dev);
+void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
+
+#define fs_get_obj(v, _node)  {v = container_of((_node), typeof(*v), node); }
+
+#define fs_list_for_each_entry(pos, root)              \
+       list_for_each_entry(pos, root, node.list)
+
+#define fs_for_each_ns_or_ft_reverse(pos, prio)                                \
+       list_for_each_entry_reverse(pos, &(prio)->node.children, list)
+
+#define fs_for_each_ns_or_ft(pos, prio)                                        \
+       list_for_each_entry(pos, (&(prio)->node.children), list)
+
+#define fs_for_each_prio(pos, ns)                      \
+       fs_list_for_each_entry(pos, &(ns)->node.children)
+
+#define fs_for_each_fg(pos, ft)                        \
+       fs_list_for_each_entry(pos, &(ft)->node.children)
+
+#define fs_for_each_fte(pos, fg)                       \
+       fs_list_for_each_entry(pos, &(fg)->node.children)
+
+#define fs_for_each_dst(pos, fte)                      \
+       fs_list_for_each_entry(pos, &(fte)->node.children)
+
+#endif
index 1c9f9a54a87339c89779973bb128a7ecfb9bf632..aa1ab47023852dcf0b163e43e57629121ab71a3a 100644 (file)
@@ -173,7 +173,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
                        return err;
        }
 
-       if (MLX5_CAP_GEN(dev, vport_group_manager)) {
+       if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
                err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH,
                                         HCA_CAP_OPMOD_GET_CUR);
                if (err)
index c6de3240f76f214d8f57f392fa654bff7d084740..789882b7b711e31eaccbd7c04903c72b2c8fa751 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/delay.h>
 #include <linux/mlx5/mlx5_ifc.h>
 #include "mlx5_core.h"
+#include "fs_core.h"
 #ifdef CONFIG_MLX5_CORE_EN
 #include "eswitch.h"
 #endif
@@ -1055,6 +1056,11 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        mlx5_init_srq_table(dev);
        mlx5_init_mr_table(dev);
 
+       err = mlx5_init_fs(dev);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to init flow steering\n");
+               goto err_fs;
+       }
 #ifdef CONFIG_MLX5_CORE_EN
        err = mlx5_eswitch_init(dev);
        if (err) {
@@ -1093,6 +1099,8 @@ err_sriov:
        mlx5_eswitch_cleanup(dev->priv.eswitch);
 #endif
 err_reg_dev:
+       mlx5_cleanup_fs(dev);
+err_fs:
        mlx5_cleanup_mr_table(dev);
        mlx5_cleanup_srq_table(dev);
        mlx5_cleanup_qp_table(dev);
@@ -1165,6 +1173,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        mlx5_eswitch_cleanup(dev->priv.eswitch);
 #endif
 
+       mlx5_cleanup_fs(dev);
        mlx5_cleanup_mr_table(dev);
        mlx5_cleanup_srq_table(dev);
        mlx5_cleanup_qp_table(dev);
index bee7da822dfe208eb9a9dd8095d76bf48de6962f..ea6a137fd76c8710a663362324818e5e6e69fe4c 100644 (file)
@@ -65,6 +65,9 @@ do {                                                                  \
                (__dev)->priv.name, __func__, __LINE__, current->pid,   \
                ##__VA_ARGS__)
 
+#define mlx5_core_info(__dev, format, ...)                             \
+       dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__)
+
 enum {
        MLX5_CMD_DATA, /* print command payload only */
        MLX5_CMD_TIME, /* print command execution time */
index 4dad146b41ae5db1f931201fc48aca944f60da67..b86db967eab9e74470a19c76ede58cbcf45e2e88 100644 (file)
@@ -169,7 +169,7 @@ static ssize_t mlxsw_hwmon_pwm_store(struct device *dev,
                dev_err(mlxsw_hwmon->bus_info->dev, "Failed to write PWM\n");
                return err;
        }
-       return err ? err : len;
+       return len;
 }
 
 enum mlxsw_hwmon_attr_type {
@@ -242,7 +242,7 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon)
        }
        sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl);
        for (i = 0; i < sensor_count; i++) {
-               mlxsw_reg_mtmp_pack(mtmp_pl, 0, true, true);
+               mlxsw_reg_mtmp_pack(mtmp_pl, i, true, true);
                err = mlxsw_reg_write(mlxsw_hwmon->core,
                                      MLXSW_REG(mtmp), mtmp_pl);
                if (err) {
index 4e4e4dcf054ff2f5987122d29ca5740e5b10010a..af631df4603a5f7f3199a556080fb1b2b554e794 100644 (file)
@@ -2684,7 +2684,7 @@ static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp,
                *p_temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp);
        }
        if (p_max_temp) {
-               temp = mlxsw_reg_mtmp_temperature_get(payload);
+               temp = mlxsw_reg_mtmp_max_temperature_get(payload);
                *p_max_temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp);
        }
        if (sensor_name)
index 3ec07b9a458df1e1ca4132a626762d0a0bd626b8..322ed544348f09a75073400ec5eefbc5f6b4270f 100644 (file)
@@ -2091,7 +2091,7 @@ static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
        err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
        if (err)
                return err;
-       mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
+       err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
        if (err)
                return err;
 
index 1cf12264861cf62c17f133e6595860e64a97193c..120cc2565d16c44b39245b8ea62680890dce824b 100644 (file)
@@ -410,9 +410,11 @@ static int ravb_dmac_init(struct net_device *ndev)
        /* Timestamp enable */
        ravb_write(ndev, TCCR_TFEN, TCCR);
 
-       /* Interrupt enable: */
+       /* Interrupt init: */
        /* Frame receive */
        ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
+       /* Disable FIFO full warning */
+       ravb_write(ndev, 0, RIC1);
        /* Receive FIFO full error, descriptor empty */
        ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
        /* Frame transmitted, timestamp FIFO updated */
@@ -1478,7 +1480,6 @@ static int ravb_close(struct net_device *ndev)
 
        /* Disable interrupts by clearing the interrupt masks. */
        ravb_write(ndev, 0, RIC0);
-       ravb_write(ndev, 0, RIC1);
        ravb_write(ndev, 0, RIC2);
        ravb_write(ndev, 0, TIC);
 
index bb1bb72121c0474b8c1898540a28d21264479d6a..17a26a429b71f2f4dfbc9495b8cb0b4f5eb2ad3f 100644 (file)
@@ -113,7 +113,7 @@ struct netcp_intf {
 #define        NETCP_PSDATA_LEN                KNAV_DMA_NUM_PS_WORDS
 struct netcp_packet {
        struct sk_buff          *skb;
-       u32                     *epib;
+       __le32                  *epib;
        u32                     *psdata;
        unsigned int            psdata_len;
        struct netcp_intf       *netcp;
index e5e20e734f21a9ac2778e3c658dc0f3848c8de38..92d08eb262c2876a39231fb68f34c8b25116478e 100644 (file)
@@ -109,69 +109,80 @@ module_param(netcp_debug_level, int, 0);
 MODULE_PARM_DESC(netcp_debug_level, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)");
 
 /* Helper functions - Get/Set */
-static void get_pkt_info(u32 *buff, u32 *buff_len, u32 *ndesc,
+static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc,
                         struct knav_dma_desc *desc)
 {
-       *buff_len = desc->buff_len;
-       *buff = desc->buff;
-       *ndesc = desc->next_desc;
+       *buff_len = le32_to_cpu(desc->buff_len);
+       *buff = le32_to_cpu(desc->buff);
+       *ndesc = le32_to_cpu(desc->next_desc);
 }
 
-static void get_pad_info(u32 *pad0, u32 *pad1, struct knav_dma_desc *desc)
+static void get_pad_info(u32 *pad0, u32 *pad1, u32 *pad2, struct knav_dma_desc *desc)
 {
-       *pad0 = desc->pad[0];
-       *pad1 = desc->pad[1];
+       *pad0 = le32_to_cpu(desc->pad[0]);
+       *pad1 = le32_to_cpu(desc->pad[1]);
+       *pad2 = le32_to_cpu(desc->pad[2]);
 }
 
-static void get_org_pkt_info(u32 *buff, u32 *buff_len,
+static void get_pad_ptr(void **padptr, struct knav_dma_desc *desc)
+{
+       u64 pad64;
+
+       pad64 = le32_to_cpu(desc->pad[0]) +
+               ((u64)le32_to_cpu(desc->pad[1]) << 32);
+       *padptr = (void *)(uintptr_t)pad64;
+}
+
+static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len,
                             struct knav_dma_desc *desc)
 {
-       *buff = desc->orig_buff;
-       *buff_len = desc->orig_len;
+       *buff = le32_to_cpu(desc->orig_buff);
+       *buff_len = le32_to_cpu(desc->orig_len);
 }
 
-static void get_words(u32 *words, int num_words, u32 *desc)
+static void get_words(dma_addr_t *words, int num_words, __le32 *desc)
 {
        int i;
 
        for (i = 0; i < num_words; i++)
-               words[i] = desc[i];
+               words[i] = le32_to_cpu(desc[i]);
 }
 
-static void set_pkt_info(u32 buff, u32 buff_len, u32 ndesc,
+static void set_pkt_info(dma_addr_t buff, u32 buff_len, u32 ndesc,
                         struct knav_dma_desc *desc)
 {
-       desc->buff_len = buff_len;
-       desc->buff = buff;
-       desc->next_desc = ndesc;
+       desc->buff_len = cpu_to_le32(buff_len);
+       desc->buff = cpu_to_le32(buff);
+       desc->next_desc = cpu_to_le32(ndesc);
 }
 
 static void set_desc_info(u32 desc_info, u32 pkt_info,
                          struct knav_dma_desc *desc)
 {
-       desc->desc_info = desc_info;
-       desc->packet_info = pkt_info;
+       desc->desc_info = cpu_to_le32(desc_info);
+       desc->packet_info = cpu_to_le32(pkt_info);
 }
 
-static void set_pad_info(u32 pad0, u32 pad1, struct knav_dma_desc *desc)
+static void set_pad_info(u32 pad0, u32 pad1, u32 pad2, struct knav_dma_desc *desc)
 {
-       desc->pad[0] = pad0;
-       desc->pad[1] = pad1;
+       desc->pad[0] = cpu_to_le32(pad0);
+       desc->pad[1] = cpu_to_le32(pad1);
+       desc->pad[2] = cpu_to_le32(pad1);
 }
 
-static void set_org_pkt_info(u32 buff, u32 buff_len,
+static void set_org_pkt_info(dma_addr_t buff, u32 buff_len,
                             struct knav_dma_desc *desc)
 {
-       desc->orig_buff = buff;
-       desc->orig_len = buff_len;
+       desc->orig_buff = cpu_to_le32(buff);
+       desc->orig_len = cpu_to_le32(buff_len);
 }
 
-static void set_words(u32 *words, int num_words, u32 *desc)
+static void set_words(u32 *words, int num_words, __le32 *desc)
 {
        int i;
 
        for (i = 0; i < num_words; i++)
-               desc[i] = words[i];
+               desc[i] = cpu_to_le32(words[i]);
 }
 
 /* Read the e-fuse value as 32 bit values to be endian independent */
@@ -570,7 +581,7 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
        dma_addr_t dma_desc, dma_buf;
        unsigned int buf_len, dma_sz = sizeof(*ndesc);
        void *buf_ptr;
-       u32 tmp;
+       u32 pad[2];
 
        get_words(&dma_desc, 1, &desc->next_desc);
 
@@ -580,14 +591,15 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
                        dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
                        break;
                }
-               get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc);
-               get_pad_info((u32 *)&buf_ptr, &tmp, ndesc);
+               get_pad_ptr(&buf_ptr, ndesc);
                dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
                __free_page(buf_ptr);
                knav_pool_desc_put(netcp->rx_pool, desc);
        }
 
-       get_pad_info((u32 *)&buf_ptr, &buf_len, desc);
+       get_pad_info(&pad[0], &pad[1], &buf_len, desc);
+       buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32));
+
        if (buf_ptr)
                netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr);
        knav_pool_desc_put(netcp->rx_pool, desc);
@@ -626,7 +638,6 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
        struct netcp_packet p_info;
        struct sk_buff *skb;
        void *org_buf_ptr;
-       u32 tmp;
 
        dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
        if (!dma_desc)
@@ -639,7 +650,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
        }
 
        get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc);
-       get_pad_info((u32 *)&org_buf_ptr, &org_buf_len, desc);
+       get_pad_ptr(&org_buf_ptr, desc);
 
        if (unlikely(!org_buf_ptr)) {
                dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
@@ -664,6 +675,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
        /* Fill in the page fragment list */
        while (dma_desc) {
                struct page *page;
+               void *ptr;
 
                ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
                if (unlikely(!ndesc)) {
@@ -672,14 +684,15 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
                }
 
                get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc);
-               get_pad_info((u32 *)&page, &tmp, ndesc);
+               get_pad_ptr(ptr, ndesc);
+               page = ptr;
 
                if (likely(dma_buff && buf_len && page)) {
                        dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
                                       DMA_FROM_DEVICE);
                } else {
-                       dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%p), len(%d), page(%p)\n",
-                               (void *)dma_buff, buf_len, page);
+                       dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%pad), len(%d), page(%p)\n",
+                               &dma_buff, buf_len, page);
                        goto free_desc;
                }
 
@@ -750,7 +763,6 @@ static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
        unsigned int buf_len, dma_sz;
        dma_addr_t dma;
        void *buf_ptr;
-       u32 tmp;
 
        /* Allocate descriptor */
        while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) {
@@ -761,7 +773,7 @@ static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
                }
 
                get_org_pkt_info(&dma, &buf_len, desc);
-               get_pad_info((u32 *)&buf_ptr, &tmp, desc);
+               get_pad_ptr(buf_ptr, desc);
 
                if (unlikely(!dma)) {
                        dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n");
@@ -813,7 +825,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
        struct page *page;
        dma_addr_t dma;
        void *bufptr;
-       u32 pad[2];
+       u32 pad[3];
 
        /* Allocate descriptor */
        hwdesc = knav_pool_desc_get(netcp->rx_pool);
@@ -830,7 +842,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
                                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 
                bufptr = netdev_alloc_frag(primary_buf_len);
-               pad[1] = primary_buf_len;
+               pad[2] = primary_buf_len;
 
                if (unlikely(!bufptr)) {
                        dev_warn_ratelimited(netcp->ndev_dev,
@@ -842,7 +854,8 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
                if (unlikely(dma_mapping_error(netcp->dev, dma)))
                        goto fail;
 
-               pad[0] = (u32)bufptr;
+               pad[0] = lower_32_bits((uintptr_t)bufptr);
+               pad[1] = upper_32_bits((uintptr_t)bufptr);
 
        } else {
                /* Allocate a secondary receive queue entry */
@@ -853,8 +866,9 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
                }
                buf_len = PAGE_SIZE;
                dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE);
-               pad[0] = (u32)page;
-               pad[1] = 0;
+               pad[0] = lower_32_bits(dma);
+               pad[1] = upper_32_bits(dma);
+               pad[2] = 0;
        }
 
        desc_info =  KNAV_DMA_DESC_PS_INFO_IN_DESC;
@@ -864,7 +878,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
        pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) <<
                    KNAV_DMA_DESC_RETQ_SHIFT;
        set_org_pkt_info(dma, buf_len, hwdesc);
-       set_pad_info(pad[0], pad[1], hwdesc);
+       set_pad_info(pad[0], pad[1], pad[2], hwdesc);
        set_desc_info(desc_info, pkt_info, hwdesc);
 
        /* Push to FDQs */
@@ -935,8 +949,8 @@ static void netcp_free_tx_desc_chain(struct netcp_intf *netcp,
                        dma_unmap_single(netcp->dev, dma_buf, buf_len,
                                         DMA_TO_DEVICE);
                else
-                       dev_warn(netcp->ndev_dev, "bad Tx desc buf(%p), len(%d)\n",
-                                (void *)dma_buf, buf_len);
+                       dev_warn(netcp->ndev_dev, "bad Tx desc buf(%pad), len(%d)\n",
+                                &dma_buf, buf_len);
 
                knav_pool_desc_put(netcp->tx_pool, ndesc);
                ndesc = NULL;
@@ -953,11 +967,11 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
                                          unsigned int budget)
 {
        struct knav_dma_desc *desc;
+       void *ptr;
        struct sk_buff *skb;
        unsigned int dma_sz;
        dma_addr_t dma;
        int pkts = 0;
-       u32 tmp;
 
        while (budget--) {
                dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz);
@@ -970,7 +984,8 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
                        continue;
                }
 
-               get_pad_info((u32 *)&skb, &tmp, desc);
+               get_pad_ptr(&ptr, desc);
+               skb = ptr;
                netcp_free_tx_desc_chain(netcp, desc, dma_sz);
                if (!skb) {
                        dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
@@ -1059,6 +1074,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
                u32 page_offset = frag->page_offset;
                u32 buf_len = skb_frag_size(frag);
                dma_addr_t desc_dma;
+               u32 desc_dma_32;
                u32 pkt_info;
 
                dma_addr = dma_map_page(dev, page, page_offset, buf_len,
@@ -1075,13 +1091,13 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
                        goto free_descs;
                }
 
-               desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool,
-                                                     (void *)ndesc);
+               desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool, ndesc);
                pkt_info =
                        (netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
                                KNAV_DMA_DESC_RETQ_SHIFT;
                set_pkt_info(dma_addr, buf_len, 0, ndesc);
-               set_words(&desc_dma, 1, &pdesc->next_desc);
+               desc_dma_32 = (u32)desc_dma;
+               set_words(&desc_dma_32, 1, &pdesc->next_desc);
                pkt_len += buf_len;
                if (pdesc != desc)
                        knav_pool_desc_map(netcp->tx_pool, pdesc,
@@ -1129,8 +1145,8 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp,
        p_info.ts_context = NULL;
        p_info.txtstamp_complete = NULL;
        p_info.epib = desc->epib;
-       p_info.psdata = desc->psdata;
-       memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(u32));
+       p_info.psdata = (u32 __force *)desc->psdata;
+       memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(__le32));
 
        /* Find out where to inject the packet for transmission */
        list_for_each_entry(tx_hook, &netcp->txhook_list_head, list) {
@@ -1154,11 +1170,12 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp,
 
        /* update descriptor */
        if (p_info.psdata_len) {
-               u32 *psdata = p_info.psdata;
+               /* psdata points to both native-endian and device-endian data */
+               __le32 *psdata = (void __force *)p_info.psdata;
 
                memmove(p_info.psdata, p_info.psdata + p_info.psdata_len,
                        p_info.psdata_len);
-               set_words(psdata, p_info.psdata_len, psdata);
+               set_words(p_info.psdata, p_info.psdata_len, psdata);
                tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) <<
                        KNAV_DMA_DESC_PSLEN_SHIFT;
        }
@@ -1173,11 +1190,14 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp,
        }
 
        set_words(&tmp, 1, &desc->packet_info);
-       set_words((u32 *)&skb, 1, &desc->pad[0]);
+       tmp = lower_32_bits((uintptr_t)&skb);
+       set_words(&tmp, 1, &desc->pad[0]);
+       tmp = upper_32_bits((uintptr_t)&skb);
+       set_words(&tmp, 1, &desc->pad[1]);
 
        if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) {
                tmp = tx_pipe->switch_to_port;
-               set_words((u32 *)&tmp, 1, &desc->tag_info);
+               set_words(&tmp, 1, &desc->tag_info);
        }
 
        /* submit packet descriptor */
index de5c30c9f059203040cc47e83cce76ae9e5bfde6..0750d7a938787bf44e101fbdbe4939ed96925400 100644 (file)
@@ -71,8 +71,14 @@ struct geneve_dev {
        __be16             dst_port;
        bool               collect_md;
        struct gro_cells   gro_cells;
+       u32                flags;
 };
 
+/* Geneve device flags */
+#define GENEVE_F_UDP_CSUM              BIT(0)
+#define GENEVE_F_UDP_ZERO_CSUM6_TX     BIT(1)
+#define GENEVE_F_UDP_ZERO_CSUM6_RX     BIT(2)
+
 struct geneve_sock {
        bool                    collect_md;
        struct list_head        list;
@@ -81,6 +87,7 @@ struct geneve_sock {
        int                     refcnt;
        struct udp_offload      udp_offloads;
        struct hlist_head       vni_list[VNI_HASH_SIZE];
+       u32                     flags;
 };
 
 static inline __u32 geneve_net_vni_hash(u8 vni[3])
@@ -343,7 +350,7 @@ error:
 }
 
 static struct socket *geneve_create_sock(struct net *net, bool ipv6,
-                                        __be16 port)
+                                        __be16 port, u32 flags)
 {
        struct socket *sock;
        struct udp_port_cfg udp_conf;
@@ -354,6 +361,8 @@ static struct socket *geneve_create_sock(struct net *net, bool ipv6,
        if (ipv6) {
                udp_conf.family = AF_INET6;
                udp_conf.ipv6_v6only = 1;
+               udp_conf.use_udp6_rx_checksums =
+                   !(flags & GENEVE_F_UDP_ZERO_CSUM6_RX);
        } else {
                udp_conf.family = AF_INET;
                udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
@@ -480,7 +489,7 @@ static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
 
 /* Create new listen socket if needed */
 static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
-                                               bool ipv6)
+                                               bool ipv6, u32 flags)
 {
        struct geneve_net *gn = net_generic(net, geneve_net_id);
        struct geneve_sock *gs;
@@ -492,7 +501,7 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
        if (!gs)
                return ERR_PTR(-ENOMEM);
 
-       sock = geneve_create_sock(net, ipv6, port);
+       sock = geneve_create_sock(net, ipv6, port, flags);
        if (IS_ERR(sock)) {
                kfree(gs);
                return ERR_CAST(sock);
@@ -575,12 +584,13 @@ static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6)
                goto out;
        }
 
-       gs = geneve_socket_create(net, geneve->dst_port, ipv6);
+       gs = geneve_socket_create(net, geneve->dst_port, ipv6, geneve->flags);
        if (IS_ERR(gs))
                return PTR_ERR(gs);
 
 out:
        gs->collect_md = geneve->collect_md;
+       gs->flags = geneve->flags;
 #if IS_ENABLED(CONFIG_IPV6)
        if (ipv6)
                geneve->sock6 = gs;
@@ -642,11 +652,12 @@ static void geneve_build_header(struct genevehdr *geneveh,
 
 static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb,
                            __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
-                           bool csum, bool xnet)
+                           u32 flags, bool xnet)
 {
        struct genevehdr *gnvh;
        int min_headroom;
        int err;
+       bool udp_sum = !!(flags & GENEVE_F_UDP_CSUM);
 
        skb_scrub_packet(skb, xnet);
 
@@ -658,7 +669,7 @@ static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb,
                goto free_rt;
        }
 
-       skb = udp_tunnel_handle_offloads(skb, csum);
+       skb = udp_tunnel_handle_offloads(skb, udp_sum);
        if (IS_ERR(skb)) {
                err = PTR_ERR(skb);
                goto free_rt;
@@ -678,11 +689,12 @@ free_rt:
 #if IS_ENABLED(CONFIG_IPV6)
 static int geneve6_build_skb(struct dst_entry *dst, struct sk_buff *skb,
                             __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
-                            bool csum, bool xnet)
+                            u32 flags, bool xnet)
 {
        struct genevehdr *gnvh;
        int min_headroom;
        int err;
+       bool udp_sum = !(flags & GENEVE_F_UDP_ZERO_CSUM6_TX);
 
        skb_scrub_packet(skb, xnet);
 
@@ -694,7 +706,7 @@ static int geneve6_build_skb(struct dst_entry *dst, struct sk_buff *skb,
                goto free_dst;
        }
 
-       skb = udp_tunnel_handle_offloads(skb, csum);
+       skb = udp_tunnel_handle_offloads(skb, udp_sum);
        if (IS_ERR(skb)) {
                err = PTR_ERR(skb);
                goto free_dst;
@@ -824,9 +836,9 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
        struct flowi4 fl4;
        __u8 tos, ttl;
        __be16 sport;
-       bool udp_csum;
        __be16 df;
        bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
+       u32 flags = geneve->flags;
 
        if (geneve->collect_md) {
                if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
@@ -857,9 +869,13 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
                if (key->tun_flags & TUNNEL_GENEVE_OPT)
                        opts = ip_tunnel_info_opts(info);
 
-               udp_csum = !!(key->tun_flags & TUNNEL_CSUM);
+               if (key->tun_flags & TUNNEL_CSUM)
+                       flags |= GENEVE_F_UDP_CSUM;
+               else
+                       flags &= ~GENEVE_F_UDP_CSUM;
+
                err = geneve_build_skb(rt, skb, key->tun_flags, vni,
-                                      info->options_len, opts, udp_csum, xnet);
+                                      info->options_len, opts, flags, xnet);
                if (unlikely(err))
                        goto err;
 
@@ -867,9 +883,8 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
                ttl = key->ttl;
                df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
        } else {
-               udp_csum = false;
                err = geneve_build_skb(rt, skb, 0, geneve->vni,
-                                      0, NULL, udp_csum, xnet);
+                                      0, NULL, flags, xnet);
                if (unlikely(err))
                        goto err;
 
@@ -883,7 +898,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
        err = udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr,
                                  tos, ttl, df, sport, geneve->dst_port,
                                  !net_eq(geneve->net, dev_net(geneve->dev)),
-                                 !udp_csum);
+                                 !(flags & GENEVE_F_UDP_CSUM));
 
        iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
        return NETDEV_TX_OK;
@@ -912,8 +927,8 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
        struct flowi6 fl6;
        __u8 prio, ttl;
        __be16 sport;
-       bool udp_csum;
        bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
+       u32 flags = geneve->flags;
 
        if (geneve->collect_md) {
                if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
@@ -942,19 +957,22 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
                if (key->tun_flags & TUNNEL_GENEVE_OPT)
                        opts = ip_tunnel_info_opts(info);
 
-               udp_csum = !!(key->tun_flags & TUNNEL_CSUM);
+               if (key->tun_flags & TUNNEL_CSUM)
+                       flags |= GENEVE_F_UDP_CSUM;
+               else
+                       flags &= ~GENEVE_F_UDP_CSUM;
+
                err = geneve6_build_skb(dst, skb, key->tun_flags, vni,
                                        info->options_len, opts,
-                                       udp_csum, xnet);
+                                       flags, xnet);
                if (unlikely(err))
                        goto err;
 
                prio = ip_tunnel_ecn_encap(key->tos, iip, skb);
                ttl = key->ttl;
        } else {
-               udp_csum = false;
                err = geneve6_build_skb(dst, skb, 0, geneve->vni,
-                                       0, NULL, udp_csum, xnet);
+                                       0, NULL, flags, xnet);
                if (unlikely(err))
                        goto err;
 
@@ -966,7 +984,8 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
        }
        err = udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev,
                                   &fl6.saddr, &fl6.daddr, prio, ttl,
-                                  sport, geneve->dst_port, !udp_csum);
+                                  sport, geneve->dst_port,
+                                  !!(flags & GENEVE_F_UDP_ZERO_CSUM6_TX));
 
        iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
        return NETDEV_TX_OK;
@@ -1099,6 +1118,9 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
        [IFLA_GENEVE_TOS]               = { .type = NLA_U8 },
        [IFLA_GENEVE_PORT]              = { .type = NLA_U16 },
        [IFLA_GENEVE_COLLECT_METADATA]  = { .type = NLA_FLAG },
+       [IFLA_GENEVE_UDP_CSUM]          = { .type = NLA_U8 },
+       [IFLA_GENEVE_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
+       [IFLA_GENEVE_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
 };
 
 static int geneve_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -1152,7 +1174,7 @@ static struct geneve_dev *geneve_find_dev(struct geneve_net *gn,
 static int geneve_configure(struct net *net, struct net_device *dev,
                            union geneve_addr *remote,
                            __u32 vni, __u8 ttl, __u8 tos, __be16 dst_port,
-                           bool metadata)
+                           bool metadata, u32 flags)
 {
        struct geneve_net *gn = net_generic(net, geneve_net_id);
        struct geneve_dev *t, *geneve = netdev_priv(dev);
@@ -1183,6 +1205,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
        geneve->tos = tos;
        geneve->dst_port = dst_port;
        geneve->collect_md = metadata;
+       geneve->flags = flags;
 
        t = geneve_find_dev(gn, dst_port, remote, geneve->vni,
                            &tun_on_same_port, &tun_collect_md);
@@ -1213,6 +1236,7 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
        bool metadata = false;
        union geneve_addr remote = geneve_remote_unspec;
        __u32 vni = 0;
+       u32 flags = 0;
 
        if (data[IFLA_GENEVE_REMOTE] && data[IFLA_GENEVE_REMOTE6])
                return -EINVAL;
@@ -1253,8 +1277,20 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
        if (data[IFLA_GENEVE_COLLECT_METADATA])
                metadata = true;
 
+       if (data[IFLA_GENEVE_UDP_CSUM] &&
+           nla_get_u8(data[IFLA_GENEVE_UDP_CSUM]))
+               flags |= GENEVE_F_UDP_CSUM;
+
+       if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX] &&
+           nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]))
+               flags |= GENEVE_F_UDP_ZERO_CSUM6_TX;
+
+       if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX] &&
+           nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]))
+               flags |= GENEVE_F_UDP_ZERO_CSUM6_RX;
+
        return geneve_configure(net, dev, &remote, vni, ttl, tos, dst_port,
-                               metadata);
+                               metadata, flags);
 }
 
 static void geneve_dellink(struct net_device *dev, struct list_head *head)
@@ -1273,6 +1309,9 @@ static size_t geneve_get_size(const struct net_device *dev)
                nla_total_size(sizeof(__u8)) +  /* IFLA_GENEVE_TOS */
                nla_total_size(sizeof(__be16)) +  /* IFLA_GENEVE_PORT */
                nla_total_size(0) +      /* IFLA_GENEVE_COLLECT_METADATA */
+               nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_CSUM */
+               nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_TX */
+               nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_RX */
                0;
 }
 
@@ -1309,6 +1348,14 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
                        goto nla_put_failure;
        }
 
+       if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM,
+                      !!(geneve->flags & GENEVE_F_UDP_CSUM)) ||
+           nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX,
+                      !!(geneve->flags & GENEVE_F_UDP_ZERO_CSUM6_TX)) ||
+           nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
+                      !!(geneve->flags & GENEVE_F_UDP_ZERO_CSUM6_RX)))
+               goto nla_put_failure;
+
        return 0;
 
 nla_put_failure:
@@ -1342,7 +1389,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
                return dev;
 
        err = geneve_configure(net, dev, &geneve_remote_unspec,
-                              0, 0, 0, htons(dst_port), true);
+                              0, 0, 0, htons(dst_port), true, 0);
        if (err) {
                free_netdev(dev);
                return ERR_PTR(err);
index 02bab9a7c9ff6e292e1c68856a8a34e860267b1b..059fc523160107239d72080cadb1f2e7c0d246df 100644 (file)
@@ -867,6 +867,14 @@ int netvsc_send(struct hv_device *device,
        packet->send_buf_index = NETVSC_INVALID_INDEX;
        packet->cp_partial = false;
 
+       /* Send control message directly without accessing msd (Multi-Send
+        * Data) field which may be changed during data packet processing.
+        */
+       if (!skb) {
+               cur_send = packet;
+               goto send_now;
+       }
+
        msdp = &net_device->msd[q_idx];
 
        /* batch packets in send buffer if possible */
@@ -939,6 +947,7 @@ int netvsc_send(struct hv_device *device,
                }
        }
 
+send_now:
        if (cur_send)
                ret = netvsc_send_pkt(cur_send, net_device, pb, skb);
 
index 9a863c6a6a33aa7883f6fa105fca917ae177e25c..fc8ad001bc949e894e7659301af65565a5a21046 100644 (file)
@@ -1138,9 +1138,15 @@ static const struct net_device_ops ppp_netdev_ops = {
        .ndo_get_stats64 = ppp_get_stats64,
 };
 
+static struct device_type ppp_type = {
+       .name = "ppp",
+};
+
 static void ppp_setup(struct net_device *dev)
 {
        dev->netdev_ops = &ppp_netdev_ops;
+       SET_NETDEV_DEVTYPE(dev, &ppp_type);
+
        dev->hard_header_len = PPP_HDRLEN;
        dev->mtu = PPP_MRU;
        dev->addr_len = 0;
@@ -2720,8 +2726,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit,
        int ret = -ENOMEM;
        int i;
 
-       dev = alloc_netdev(sizeof(struct ppp), "", NET_NAME_UNKNOWN,
-                          ppp_setup);
+       dev = alloc_netdev(sizeof(struct ppp), "", NET_NAME_ENUM, ppp_setup);
        if (!dev)
                goto out1;
 
index e73f138578468db3a83bd882856ce897a747f2c0..a20d688d259570467d83585962815f42be5bd38d 100644 (file)
@@ -586,6 +586,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
        if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(28)) ||
            pci_set_dma_mask(pdev, DMA_BIT_MASK(28))) {
                pr_err("No usable DMA configuration\n");
+               pci_disable_device(pdev);
                return -EIO;
        }
 
index 05c37d6d4afef27cdb182dbb7621ee7ef4fd2490..c3e22523faf36b2b21aa83dae388d2f2abf17206 100644 (file)
@@ -1677,11 +1677,8 @@ static int ctcm_shutdown_device(struct ccwgroup_device *cgdev)
 
        ccw_device_set_offline(cgdev->cdev[1]);
        ccw_device_set_offline(cgdev->cdev[0]);
-
-       if (priv->channel[CTCM_READ])
-               channel_remove(priv->channel[CTCM_READ]);
-       if (priv->channel[CTCM_WRITE])
-               channel_remove(priv->channel[CTCM_WRITE]);
+       channel_remove(priv->channel[CTCM_READ]);
+       channel_remove(priv->channel[CTCM_WRITE]);
        priv->channel[CTCM_READ] = priv->channel[CTCM_WRITE] = NULL;
 
        return 0;
index 1766a20ebcb116d42a1b4173154da900260e0767..ec2e014e885c285b15f6256750d77706f9f00c94 100644 (file)
@@ -981,6 +981,10 @@ int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16,
                          int (*reply_cb)(struct qeth_card *,
                                          struct qeth_reply *, unsigned long),
                          void *);
+struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
+                                                enum qeth_ipa_funcs,
+                                                __u16, __u16,
+                                                enum qeth_prot_versions);
 int qeth_start_ipa_tx_checksum(struct qeth_card *);
 int qeth_set_rx_csum(struct qeth_card *, int);
 
index 31ac53fa5cee9b81185bdba5c047d12cdf28c17f..787153764120cc8db03ef577cb485ffa765b7735 100644 (file)
@@ -2684,8 +2684,6 @@ void qeth_print_status_message(struct qeth_card *card)
                        sprintf(card->info.mcl_level, "%02x%02x",
                                card->info.mcl_level[2],
                                card->info.mcl_level[3]);
-
-                       card->info.mcl_level[QETH_MCL_LENGTH] = 0;
                        break;
                }
                /* fallthrough */
@@ -5297,10 +5295,10 @@ static int qeth_setassparms_cb(struct qeth_card *card,
        return 0;
 }
 
-static struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
-                                                 enum qeth_ipa_funcs ipa_func,
-                                                 __u16 cmd_code, __u16 len,
-                                                 enum qeth_prot_versions prot)
+struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
+                                                enum qeth_ipa_funcs ipa_func,
+                                                __u16 cmd_code, __u16 len,
+                                                enum qeth_prot_versions prot)
 {
        struct qeth_cmd_buffer *iob;
        struct qeth_ipa_cmd *cmd;
@@ -5319,6 +5317,7 @@ static struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
 
        return iob;
 }
+EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
 
 int qeth_send_setassparms(struct qeth_card *card,
                          struct qeth_cmd_buffer *iob, __u16 len, long data,
index 8f1b091e173277a718c232bdbf707fb03db21945..80b1979e8d955f6022e7810a4b65a62f13ec1224 100644 (file)
@@ -1126,6 +1126,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
        qeth_l2_request_initial_mac(card);
        SET_NETDEV_DEV(card->dev, &card->gdev->dev);
        netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
+       netif_carrier_off(card->dev);
        return register_netdev(card->dev);
 }
 
index 543960e96b42b362af6ee7d68bafba057042cc32..7c8c68c26540fdc3e58c8a8a32a17dd924af6db9 100644 (file)
@@ -1043,28 +1043,6 @@ static int qeth_l3_default_setassparms_cb(struct qeth_card *card,
        return 0;
 }
 
-static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd(
-       struct qeth_card *card, enum qeth_ipa_funcs ipa_func, __u16 cmd_code,
-       __u16 len, enum qeth_prot_versions prot)
-{
-       struct qeth_cmd_buffer *iob;
-       struct qeth_ipa_cmd *cmd;
-
-       QETH_CARD_TEXT(card, 4, "getasscm");
-       iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
-
-       if (iob) {
-               cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
-               cmd->data.setassparms.hdr.assist_no = ipa_func;
-               cmd->data.setassparms.hdr.length = 8 + len;
-               cmd->data.setassparms.hdr.command_code = cmd_code;
-               cmd->data.setassparms.hdr.return_code = 0;
-               cmd->data.setassparms.hdr.seq_no = 0;
-       }
-
-       return iob;
-}
-
 #ifdef CONFIG_QETH_IPV6
 static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
                enum qeth_ipa_funcs ipa_func, __u16 cmd_code)
@@ -1073,7 +1051,7 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
        struct qeth_cmd_buffer *iob;
 
        QETH_CARD_TEXT(card, 4, "simassp6");
-       iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
+       iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
                                       0, QETH_PROT_IPV6);
        if (!iob)
                return -ENOMEM;
@@ -2344,10 +2322,11 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
 
        QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot);
 
-       iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
-                       IPA_CMD_ASS_ARP_QUERY_INFO,
-                       sizeof(struct qeth_arp_query_data) - sizeof(char),
-                       prot);
+       iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
+                                      IPA_CMD_ASS_ARP_QUERY_INFO,
+                                      sizeof(struct qeth_arp_query_data)
+                                               - sizeof(char),
+                                      prot);
        if (!iob)
                return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
@@ -2439,7 +2418,7 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
                return -EOPNOTSUPP;
        }
 
-       iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
+       iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
                                       IPA_CMD_ASS_ARP_ADD_ENTRY,
                                       sizeof(struct qeth_arp_cache_entry),
                                       QETH_PROT_IPV4);
@@ -2480,7 +2459,7 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card,
                return -EOPNOTSUPP;
        }
        memcpy(buf, entry, 12);
-       iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
+       iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
                                       IPA_CMD_ASS_ARP_REMOVE_ENTRY,
                                       12,
                                       QETH_PROT_IPV4);
@@ -2818,7 +2797,7 @@ static inline int qeth_l3_tso_elements(struct sk_buff *skb)
 {
        unsigned long tcpd = (unsigned long)tcp_hdr(skb) +
                tcp_hdr(skb)->doff * 4;
-       int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data);
+       int tcpd_len = skb_headlen(skb) - (tcpd - (unsigned long)skb->data);
        int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd);
 
        elements += qeth_get_elements_for_frags(skb);
@@ -3220,6 +3199,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
 
        SET_NETDEV_DEV(card->dev, &card->gdev->dev);
        netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
+       netif_carrier_off(card->dev);
        return register_netdev(card->dev);
 }
 
index ac098b6b97bf95c48eef5f81fd0754b160a18da6..2fd7019f69dbf1e40fbf1a64cfb9b4ec39938f92 100644 (file)
@@ -502,6 +502,8 @@ struct mlx5_priv {
        struct mlx5_eswitch     *eswitch;
        struct mlx5_core_sriov  sriov;
        unsigned long           pci_dev_data;
+       struct mlx5_flow_root_namespace *root_ns;
+       struct mlx5_flow_root_namespace *fdb_root_ns;
 };
 
 enum mlx5_device_state {
diff --git a/include/linux/mlx5/flow_table.h b/include/linux/mlx5/flow_table.h
deleted file mode 100644 (file)
index 0f2a15c..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX5_FLOW_TABLE_H
-#define MLX5_FLOW_TABLE_H
-
-#include <linux/mlx5/driver.h>
-
-struct mlx5_flow_table_group {
-       u8      log_sz;
-       u8      match_criteria_enable;
-       u32     match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
-};
-
-struct mlx5_flow_destination {
-       enum mlx5_flow_destination_type type;
-       union {
-               u32                     tir_num;
-               void                    *ft;
-               u32                     vport_num;
-       };
-};
-
-void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
-                            u16 num_groups,
-                            struct mlx5_flow_table_group *group);
-void mlx5_destroy_flow_table(void *flow_table);
-int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
-                             void *match_criteria, void *flow_context,
-                             u32 *flow_index);
-void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
-u32 mlx5_get_flow_table_id(void *flow_table);
-
-#endif /* MLX5_FLOW_TABLE_H */
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
new file mode 100644 (file)
index 0000000..bc7ad01
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _MLX5_FS_
+#define _MLX5_FS_
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/mlx5_ifc.h>
+
+#define MLX5_FS_DEFAULT_FLOW_TAG 0x0
+
+enum mlx5_flow_namespace_type {
+       MLX5_FLOW_NAMESPACE_KERNEL,
+       MLX5_FLOW_NAMESPACE_FDB,
+};
+
+struct mlx5_flow_table;
+struct mlx5_flow_group;
+struct mlx5_flow_rule;
+struct mlx5_flow_namespace;
+
+struct mlx5_flow_destination {
+       enum mlx5_flow_destination_type type;
+       union {
+               u32                     tir_num;
+               struct mlx5_flow_table  *ft;
+               u32                     vport_num;
+       };
+};
+
+struct mlx5_flow_namespace *
+mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
+                       enum mlx5_flow_namespace_type type);
+
+struct mlx5_flow_table *
+mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+                      int prio,
+                      int num_flow_table_entries);
+int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
+
+/* inbox should be set with the following values:
+ * start_flow_index
+ * end_flow_index
+ * match_criteria_enable
+ * match_criteria
+ */
+struct mlx5_flow_group *
+mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
+void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
+
+/* Single destination per rule.
+ * Group ID is implied by the match criteria.
+ */
+struct mlx5_flow_rule *
+mlx5_add_flow_rule(struct mlx5_flow_table *ft,
+                  u8 match_criteria_enable,
+                  u32 *match_criteria,
+                  u32 *match_value,
+                  u32 action,
+                  u32 flow_tag,
+                  struct mlx5_flow_destination *dest);
+void mlx5_del_flow_rule(struct mlx5_flow_rule *fr);
+
+#endif
index f5d94495758a605c35b4bc8d81e41de673433e6b..131a2737cfa3c0ae739b7eca77d02017ab3103b6 100644 (file)
@@ -256,25 +256,27 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
 
 struct mlx5_ifc_flow_table_prop_layout_bits {
        u8         ft_support[0x1];
-       u8         reserved_0[0x1f];
+       u8         reserved_0[0x2];
+       u8         flow_modify_en[0x1];
+       u8         reserved_1[0x1c];
 
-       u8         reserved_1[0x2];
+       u8         reserved_2[0x2];
        u8         log_max_ft_size[0x6];
-       u8         reserved_2[0x10];
+       u8         reserved_3[0x10];
        u8         max_ft_level[0x8];
 
-       u8         reserved_3[0x20];
+       u8         reserved_4[0x20];
 
-       u8         reserved_4[0x18];
+       u8         reserved_5[0x18];
        u8         log_max_ft_num[0x8];
 
-       u8         reserved_5[0x18];
+       u8         reserved_6[0x18];
        u8         log_max_destination[0x8];
 
-       u8         reserved_6[0x18];
+       u8         reserved_7[0x18];
        u8         log_max_flow[0x8];
 
-       u8         reserved_7[0x40];
+       u8         reserved_8[0x40];
 
        struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
 
@@ -2843,6 +2845,13 @@ struct mlx5_ifc_set_hca_cap_in_bits {
        union mlx5_ifc_hca_cap_union_bits capability;
 };
 
+enum {
+       MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION    = 0x0,
+       MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_TAG  = 0x1,
+       MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST    = 0x2,
+       MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS    = 0x3
+};
+
 struct mlx5_ifc_set_fte_out_bits {
        u8         status[0x8];
        u8         reserved_0[0x18];
@@ -2867,11 +2876,14 @@ struct mlx5_ifc_set_fte_in_bits {
        u8         reserved_4[0x8];
        u8         table_id[0x18];
 
-       u8         reserved_5[0x40];
+       u8         reserved_5[0x18];
+       u8         modify_enable_mask[0x8];
+
+       u8         reserved_6[0x20];
 
        u8         flow_index[0x20];
 
-       u8         reserved_6[0xe0];
+       u8         reserved_7[0xe0];
 
        struct mlx5_ifc_flow_context_bits flow_context;
 };
index dad035c16d94f98255a441cec6a0a0641003c33c..343c13ac4f71f41f370b3bd51c8cd38c9ea75c2f 100644 (file)
@@ -144,17 +144,17 @@ struct knav_dma_cfg {
  * @psdata:                    Protocol specific
  */
 struct knav_dma_desc {
-       u32     desc_info;
-       u32     tag_info;
-       u32     packet_info;
-       u32     buff_len;
-       u32     buff;
-       u32     next_desc;
-       u32     orig_len;
-       u32     orig_buff;
-       u32     epib[KNAV_DMA_NUM_EPIB_WORDS];
-       u32     psdata[KNAV_DMA_NUM_PS_WORDS];
-       u32     pad[4];
+       __le32  desc_info;
+       __le32  tag_info;
+       __le32  packet_info;
+       __le32  buff_len;
+       __le32  buff;
+       __le32  next_desc;
+       __le32  orig_len;
+       __le32  orig_buff;
+       __le32  epib[KNAV_DMA_NUM_EPIB_WORDS];
+       __le32  psdata[KNAV_DMA_NUM_PS_WORDS];
+       __le32  pad[4];
 } ____cacheline_aligned;
 
 #if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA)
index 9fcaedf994ee2ba5db20a0635c78ceed06f57cc1..10a16b5bd1c70b58ee6e02b8515725a93afc5d18 100644 (file)
@@ -165,7 +165,8 @@ static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
        csum = csum_sub(csum, csum_partial(ptr, start, 0));
 
        /* Set derived checksum in packet */
-       delta = csum_sub(csum_fold(csum), *psum);
+       delta = csum_sub((__force __wsum)csum_fold(csum),
+                        (__force __wsum)*psum);
        *psum = csum_fold(csum);
 
        return delta;
index 5ad57375a99fdcfe8f8217fbbb6b52483fcd66a4..2be1dd5a103fbfb9cd707af6fad7c5633b8b784a 100644 (file)
@@ -462,6 +462,9 @@ enum {
        IFLA_GENEVE_PORT,       /* destination port */
        IFLA_GENEVE_COLLECT_METADATA,
        IFLA_GENEVE_REMOTE6,
+       IFLA_GENEVE_UDP_CSUM,
+       IFLA_GENEVE_UDP_ZERO_CSUM6_TX,
+       IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
        __IFLA_GENEVE_MAX
 };
 #define IFLA_GENEVE_MAX        (__IFLA_GENEVE_MAX - 1)
index 5a8a797d50b74a8bb698760856c1849ff9bcc084..f2ece3c174a5b540b40492833048d6c8bd9f3f1c 100644 (file)
@@ -187,11 +187,31 @@ static int bpf_mkobj(struct inode *dir, struct dentry *dentry, umode_t mode,
        }
 }
 
+static int bpf_link(struct dentry *old_dentry, struct inode *dir,
+                   struct dentry *new_dentry)
+{
+       if (bpf_dname_reserved(new_dentry))
+               return -EPERM;
+
+       return simple_link(old_dentry, dir, new_dentry);
+}
+
+static int bpf_rename(struct inode *old_dir, struct dentry *old_dentry,
+                     struct inode *new_dir, struct dentry *new_dentry)
+{
+       if (bpf_dname_reserved(new_dentry))
+               return -EPERM;
+
+       return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
+}
+
 static const struct inode_operations bpf_dir_iops = {
        .lookup         = simple_lookup,
        .mknod          = bpf_mkobj,
        .mkdir          = bpf_mkdir,
        .rmdir          = simple_rmdir,
+       .rename         = bpf_rename,
+       .link           = bpf_link,
        .unlink         = simple_unlink,
 };
 
index 4f8f7927b4222a98f924c2d8ac906888fe07210a..4466273f59e18c248ab64498e844af69f6dc1906 100644 (file)
@@ -5790,7 +5790,7 @@ EXPORT_SYMBOL_GPL(cgroup_get_from_path);
 
 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
 
-spinlock_t cgroup_sk_update_lock;
+DEFINE_SPINLOCK(cgroup_sk_update_lock);
 static bool cgroup_sk_alloc_disabled __read_mostly;
 
 void cgroup_sk_alloc_disable(void)
index 5e9111da449d35a0d7ca24b6202785a9bd831fdb..7082fb79d876c4cbb08e3894df753be16399ea6b 100644 (file)
@@ -70,7 +70,7 @@
 #include <net/sock.h>
 #include <net/snmp.h>
 
-#include <net/af_ieee802154.h>
+#include <net/6lowpan.h>
 #include <net/firewire.h>
 #include <net/ipv6.h>
 #include <net/protocol.h>
@@ -1947,9 +1947,9 @@ static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
 
 static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)
 {
-       if (dev->addr_len != IEEE802154_ADDR_LEN)
+       if (dev->addr_len != EUI64_ADDR_LEN)
                return -1;
-       memcpy(eui, dev->dev_addr, 8);
+       memcpy(eui, dev->dev_addr, EUI64_ADDR_LEN);
        eui[0] ^= 2;
        return 0;
 }
@@ -2041,7 +2041,6 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
        case ARPHRD_IPGRE:
                return addrconf_ifid_gre(eui, dev);
        case ARPHRD_6LOWPAN:
-       case ARPHRD_IEEE802154:
                return addrconf_ifid_eui64(eui, dev);
        case ARPHRD_IEEE1394:
                return addrconf_ifid_ieee1394(eui, dev);
@@ -3066,7 +3065,6 @@ static void addrconf_dev_config(struct net_device *dev)
            (dev->type != ARPHRD_FDDI) &&
            (dev->type != ARPHRD_ARCNET) &&
            (dev->type != ARPHRD_INFINIBAND) &&
-           (dev->type != ARPHRD_IEEE802154) &&
            (dev->type != ARPHRD_IEEE1394) &&
            (dev->type != ARPHRD_TUNNEL6) &&
            (dev->type != ARPHRD_6LOWPAN)) {
index 3ea4c98d94dcee8ae8d1ba826c9046cecce1bca2..ef50a94d3eb726f75d4202a7a15c7dd1f812a3aa 100644 (file)
@@ -1031,7 +1031,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
        struct sock *sk = sock->sk;
        struct iucv_sock *iucv = iucv_sk(sk);
        struct sk_buff *skb;
-       struct iucv_message txmsg;
+       struct iucv_message txmsg = {0};
        struct cmsghdr *cmsg;
        int cmsg_done;
        long timeo;
@@ -2084,11 +2084,7 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
                return NET_RX_SUCCESS;
        }
 
-               /* write stuff from iucv_msg to skb cb */
-       if (skb->len < sizeof(struct af_iucv_trans_hdr)) {
-               kfree_skb(skb);
-               return NET_RX_SUCCESS;
-       }
+       /* write stuff from iucv_msg to skb cb */
        skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
        skb_reset_transport_header(skb);
        skb_reset_network_header(skb);
@@ -2119,6 +2115,20 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
        char nullstring[8];
        int err = 0;
 
+       if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) {
+               WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d",
+                         (int)skb->len,
+                         (int)(ETH_HLEN + sizeof(struct af_iucv_trans_hdr)));
+               kfree_skb(skb);
+               return NET_RX_SUCCESS;
+       }
+       if (skb_headlen(skb) < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr)))
+               if (skb_linearize(skb)) {
+                       WARN_ONCE(1, "AF_IUCV skb_linearize failed, len=%d",
+                                 (int)skb->len);
+                       kfree_skb(skb);
+                       return NET_RX_SUCCESS;
+               }
        skb_pull(skb, ETH_HLEN);
        trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
        EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
index 67591aef9cae6fb203ef802272baad6e5b191000..cdd01e6416db43970a3c474382d66e53d71ca97f 100644 (file)
@@ -37,7 +37,7 @@ static unsigned int mpls_encap_size(struct mpls_iptunnel_encap *en)
        return en->labels * sizeof(struct mpls_shim_hdr);
 }
 
-int mpls_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+static int mpls_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct mpls_iptunnel_encap *tun_encap_info;
        struct mpls_shim_hdr *hdr;
This page took 0.197631 seconds and 5 git commands to generate.