Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Sat, 1 Aug 2015 06:52:20 +0000 (23:52 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sat, 1 Aug 2015 06:52:20 +0000 (23:52 -0700)
Conflicts:
arch/s390/net/bpf_jit_comp.c
drivers/net/ethernet/ti/netcp_ethss.c
net/bridge/br_multicast.c
net/ipv4/ip_fragment.c

All four conflicts were cases of simple overlapping
changes.

Signed-off-by: David S. Miller <davem@davemloft.net>
545 files changed:
Documentation/devicetree/bindings/net/ethernet.txt
Documentation/devicetree/bindings/net/keystone-netcp.txt
Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt [new file with mode: 0644]
Documentation/networking/ip-sysctl.txt
Documentation/networking/stmmac.txt
Documentation/networking/switchdev.txt
Documentation/networking/timestamping.txt
MAINTAINERS
arch/arm/net/bpf_jit_32.c
arch/arm/net/bpf_jit_32.h
arch/s390/net/bpf_jit.h
arch/s390/net/bpf_jit_comp.c
arch/sparc/net/bpf_jit_comp.c
arch/x86/net/bpf_jit_comp.c
drivers/bcma/main.c
drivers/bluetooth/Kconfig
drivers/bluetooth/bfusb.c
drivers/bluetooth/bt3c_cs.c
drivers/bluetooth/btbcm.c
drivers/bluetooth/btintel.c
drivers/bluetooth/btintel.h
drivers/bluetooth/btmrvl_drv.h
drivers/bluetooth/btusb.c
drivers/bluetooth/dtl1_cs.c
drivers/bluetooth/hci_h5.c
drivers/bluetooth/hci_intel.c
drivers/bluetooth/hci_ldisc.c
drivers/bluetooth/hci_uart.h
drivers/infiniband/hw/mlx4/cq.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_netlink.c
drivers/net/bonding/bond_options.c
drivers/net/bonding/bond_sysfs.c
drivers/net/dsa/Kconfig
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/mv88e6352.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/dsa/mv88e6xxx.h
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/cavium/Kconfig
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/l2t.c
drivers/net/ethernet/chelsio/cxgb4/l2t.h
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/cisco/enic/enic_ethtool.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/ec_bhf.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec_ptp.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/gianfar.h
drivers/net/ethernet/freescale/gianfar_ethtool.c
drivers/net/ethernet/hisilicon/hip04_eth.c
drivers/net/ethernet/hisilicon/hip04_mdio.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/ibm/ibmveth.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_dcb.h
drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_diag.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_fcoe.c
drivers/net/ethernet/intel/i40e/i40e_fcoe.h
drivers/net/ethernet/intel/i40e/i40e_hmc.c
drivers/net/ethernet/intel/i40e/i40e_hmc.h
drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_nvm.c
drivers/net/ethernet/intel/i40e/i40e_prototype.h
drivers/net/ethernet/intel/i40e/i40e_ptp.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40evf/i40e_common.c
drivers/net/ethernet/intel/i40evf/i40e_hmc.h
drivers/net/ethernet/intel/i40evf/i40e_prototype.h
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.h
drivers/net/ethernet/intel/i40evf/i40e_type.h
drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
drivers/net/ethernet/intel/i40evf/i40evf.h
drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/e1000_phy.c
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbevf/defines.h
drivers/net/ethernet/intel/ixgbevf/ethtool.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/Kconfig
drivers/net/ethernet/mellanox/Makefile
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/fw.h
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlx5/core/transobj.c
drivers/net/ethernet/mellanox/mlx5/core/transobj.h
drivers/net/ethernet/mellanox/mlx5/core/uar.c
drivers/net/ethernet/mellanox/mlx5/core/wq.c
drivers/net/ethernet/mellanox/mlx5/core/wq.h
drivers/net/ethernet/mellanox/mlxsw/Kconfig [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/Makefile [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/cmd.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/core.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/core.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/emad.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/item.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/pci.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/pci.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/port.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/reg.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/switchx2.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/trap.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/txheader.h [new file with mode: 0644]
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/neterion/s2io.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/renesas/ravb.h
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/rocker/rocker.c
drivers/net/ethernet/rocker/rocker.h
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/mcdi.c
drivers/net/ethernet/sfc/mcdi.h
drivers/net/ethernet/sfc/mcdi_pcol.h
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/nic.h
drivers/net/ethernet/sfc/selftest.c
drivers/net/ethernet/sfc/siena.c
drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
drivers/net/ethernet/synopsys/Kconfig [new file with mode: 0644]
drivers/net/ethernet/synopsys/Makefile [new file with mode: 0644]
drivers/net/ethernet/synopsys/dwc_eth_qos.c [new file with mode: 0644]
drivers/net/ethernet/ti/netcp_core.c
drivers/net/ethernet/ti/netcp_ethss.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ieee802154/at86rf230.c
drivers/net/ieee802154/cc2520.c
drivers/net/ieee802154/mrf24j40.c
drivers/net/ifb.c
drivers/net/phy/Kconfig
drivers/net/phy/Makefile
drivers/net/phy/aquantia.c [new file with mode: 0644]
drivers/net/phy/dp83640.c
drivers/net/phy/dp83867.c
drivers/net/phy/fixed_phy.c
drivers/net/phy/marvell.c
drivers/net/phy/mdio-octeon.c
drivers/net/phy/phy.c
drivers/net/phy/spi_ks8995.c
drivers/net/phy/teranetics.c [new file with mode: 0644]
drivers/net/usb/Kconfig
drivers/net/usb/Makefile
drivers/net/usb/lan78xx.c [new file with mode: 0644]
drivers/net/usb/lan78xx.h [new file with mode: 0644]
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/vxlan.c
drivers/net/wireless/ath/ath10k/Makefile
drivers/net/wireless/ath/ath10k/bmi.h
drivers/net/wireless/ath/ath10k/ce.c
drivers/net/wireless/ath/ath10k/ce.h
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/core.h
drivers/net/wireless/ath/ath10k/debug.c
drivers/net/wireless/ath/ath10k/htt.c
drivers/net/wireless/ath/ath10k/htt.h
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/htt_tx.c
drivers/net/wireless/ath/ath10k/hw.c
drivers/net/wireless/ath/ath10k/hw.h
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/mac.h
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/ath/ath10k/pci.h
drivers/net/wireless/ath/ath10k/swap.c [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/swap.h [new file with mode: 0644]
drivers/net/wireless/ath/ath10k/targaddrs.h
drivers/net/wireless/ath/ath10k/txrx.c
drivers/net/wireless/ath/ath10k/wmi-tlv.c
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/ath/ath9k/ar9003_phy.h
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/debug.h
drivers/net/wireless/ath/ath9k/dfs.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/dfs_pri_detector.c
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/ath/wil6210/wil6210.h
drivers/net/wireless/b43/lo.c
drivers/net/wireless/b43/lo.h
drivers/net/wireless/b43/phy_g.c
drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/brcm80211/brcmfmac/core.h
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/cw1200/cw1200_spi.c
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/iwlegacy/3945-mac.c
drivers/net/wireless/iwlegacy/debug.c
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/decl.h
drivers/net/wireless/mwifiex/fw.h
drivers/net/wireless/mwifiex/ie.c
drivers/net/wireless/mwifiex/init.c
drivers/net/wireless/mwifiex/join.c
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/mwifiex/main.h
drivers/net/wireless/mwifiex/scan.c
drivers/net/wireless/mwifiex/sta_cmd.c
drivers/net/wireless/mwifiex/sta_cmdresp.c
drivers/net/wireless/mwifiex/sta_event.c
drivers/net/wireless/mwifiex/tdls.c
drivers/net/wireless/mwifiex/txrx.c
drivers/net/wireless/mwifiex/uap_cmd.c
drivers/net/wireless/mwifiex/uap_event.c
drivers/net/wireless/mwifiex/usb.c
drivers/net/wireless/mwifiex/util.c
drivers/net/wireless/mwifiex/wmm.c
drivers/net/wireless/mwifiex/wmm.h
drivers/net/wireless/rtlwifi/rtl8192cu/def.h
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
drivers/net/wireless/rtlwifi/rtl8192de/phy.c
drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
drivers/net/wireless/ti/wlcore/rx.c
drivers/net/wireless/ti/wlcore/rx.h
drivers/net/wireless/ti/wlcore/sdio.c
drivers/net/xen-netback/common.h
drivers/of/of_mdio.c
include/linux/bpf.h
include/linux/filter.h
include/linux/ipv6.h
include/linux/mlx4/cq.h
include/linux/mlx4/device.h
include/linux/mlx4/qp.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mpls_iptunnel.h [new file with mode: 0644]
include/linux/netdevice.h
include/linux/phy.h
include/linux/skbuff.h
include/linux/stmmac.h
include/net/act_api.h
include/net/addrconf.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/l2cap.h
include/net/bond_options.h
include/net/cfg802154.h
include/net/cls_cgroup.h
include/net/dst.h
include/net/dst_metadata.h [new file with mode: 0644]
include/net/fib_rules.h
include/net/flow.h
include/net/inet_hashtables.h
include/net/inet_timewait_sock.h
include/net/ip.h
include/net/ip6_fib.h
include/net/ip_fib.h
include/net/ip_tunnels.h
include/net/ipv6.h
include/net/lwtunnel.h [new file with mode: 0644]
include/net/mac802154.h
include/net/mpls_iptunnel.h [new file with mode: 0644]
include/net/netns/ipv6.h
include/net/route.h
include/net/rtnetlink.h
include/net/sch_generic.h
include/net/sock.h
include/net/switchdev.h
include/net/tc_act/tc_gact.h
include/net/tc_act/tc_mirred.h
include/net/tcp.h
include/net/timewait_sock.h
include/net/vxlan.h
include/uapi/linux/Kbuild
include/uapi/linux/bpf.h
include/uapi/linux/ethtool.h
include/uapi/linux/fib_rules.h
include/uapi/linux/if_bridge.h
include/uapi/linux/if_link.h
include/uapi/linux/ipv6.h
include/uapi/linux/lwtunnel.h [new file with mode: 0644]
include/uapi/linux/mpls_iptunnel.h [new file with mode: 0644]
include/uapi/linux/openvswitch.h
include/uapi/linux/rtnetlink.h
include/uapi/linux/snmp.h
kernel/bpf/core.c
kernel/bpf/verifier.c
lib/test_bpf.c
lib/test_rhashtable.c
net/6lowpan/iphc.c
net/Kconfig
net/atm/br2684.c
net/bluetooth/6lowpan.c
net/bluetooth/Kconfig
net/bluetooth/Makefile
net/bluetooth/a2mp.c
net/bluetooth/a2mp.h
net/bluetooth/amp.c
net/bluetooth/amp.h
net/bluetooth/cmtp/capi.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/l2cap_sock.c
net/bluetooth/mgmt.c
net/bridge/br_if.c
net/bridge/br_mdb.c
net/bridge/br_multicast.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/core/Makefile
net/core/dev.c
net/core/dst.c
net/core/fib_rules.c
net/core/filter.c
net/core/flow_dissector.c
net/core/lwtunnel.c [new file with mode: 0644]
net/core/net-sysfs.c
net/core/pktgen.c
net/core/rtnetlink.c
net/core/timestamping.c
net/dsa/dsa_priv.h
net/dsa/slave.c
net/dsa/tag_brcm.c
net/dsa/tag_dsa.c
net/dsa/tag_edsa.c
net/dsa/tag_trailer.c
net/ieee802154/rdev-ops.h
net/ieee802154/sysfs.c
net/ieee802154/trace.h
net/ipv4/af_inet.c
net/ipv4/arp.c
net/ipv4/datagram.c
net/ipv4/fib_frontend.c
net/ipv4/fib_semantics.c
net/ipv4/icmp.c
net/ipv4/inet_hashtables.c
net/ipv4/inet_timewait_sock.c
net/ipv4/ip_fragment.c
net/ipv4/ip_input.c
net/ipv4/ip_tunnel_core.c
net/ipv4/ping.c
net/ipv4/proc.c
net/ipv4/route.c
net/ipv4/tcp_bic.c
net/ipv4/tcp_cdg.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_cubic.c
net/ipv4/tcp_highspeed.c
net/ipv4/tcp_htcp.c
net/ipv4/tcp_hybla.c
net/ipv4/tcp_illinois.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_scalable.c
net/ipv4/tcp_timer.c
net/ipv4/tcp_vegas.c
net/ipv4/tcp_veno.c
net/ipv6/Kconfig
net/ipv6/addrconf.c
net/ipv6/addrconf_core.c
net/ipv6/af_inet6.c
net/ipv6/datagram.c
net/ipv6/exthdrs.c
net/ipv6/icmp.c
net/ipv6/inet6_hashtables.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_input.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ndisc.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/sysctl_net_ipv6.c
net/ipv6/tcp_ipv6.c
net/mac802154/cfg.c
net/mac802154/ieee802154_i.h
net/mac802154/iface.c
net/mac802154/main.c
net/mac802154/rx.c
net/mac802154/tx.c
net/mac802154/util.c
net/mpls/Kconfig
net/mpls/Makefile
net/mpls/af_mpls.c
net/mpls/internal.h
net/mpls/mpls_iptunnel.c [new file with mode: 0644]
net/netfilter/nft_meta.c
net/netfilter/xt_TPROXY.c
net/openvswitch/Makefile
net/openvswitch/actions.c
net/openvswitch/datapath.c
net/openvswitch/datapath.h
net/openvswitch/dp_notify.c
net/openvswitch/flow.c
net/openvswitch/flow.h
net/openvswitch/flow_netlink.c
net/openvswitch/flow_netlink.h
net/openvswitch/flow_table.c
net/openvswitch/vport-geneve.c
net/openvswitch/vport-gre.c
net/openvswitch/vport-internal_dev.c
net/openvswitch/vport-netdev.c
net/openvswitch/vport-netdev.h
net/openvswitch/vport-vxlan.c
net/openvswitch/vport-vxlan.h [deleted file]
net/openvswitch/vport.c
net/openvswitch/vport.h
net/packet/af_packet.c
net/sched/act_api.c
net/sched/act_bpf.c
net/sched/act_connmark.c
net/sched/act_csum.c
net/sched/act_gact.c
net/sched/act_ipt.c
net/sched/act_mirred.c
net/sched/act_nat.c
net/sched/act_pedit.c
net/sched/act_simple.c
net/sched/act_skbedit.c
net/sched/act_vlan.c
net/sched/cls_cgroup.c
net/sched/sch_qfq.c
net/sctp/protocol.c
net/sctp/sm_statefuns.c
net/switchdev/switchdev.c
net/tipc/bcast.c
net/tipc/bcast.h
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/core.h
net/tipc/discover.c
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/msg.h
net/tipc/name_distr.c
net/tipc/node.c
net/tipc/node.h
net/tipc/socket.c
net/tipc/socket.h
net/tipc/udp_media.c
net/xfrm/xfrm_user.c
samples/bpf/bpf_helpers.h
samples/bpf/test_verifier.c
samples/bpf/tracex1_kern.c
samples/bpf/tracex2_kern.c
samples/bpf/tracex3_kern.c
samples/bpf/tracex4_kern.c
samples/bpf/tracex5_kern.c
tools/net/bpf_jit_disasm.c

index 41b3f3f864e84d7f6941bb61280870d70ea687b7..5d88f37480b6a75e13f61e782829bf9babbf46a9 100644 (file)
@@ -25,7 +25,11 @@ The following properties are common to the Ethernet controllers:
   flow control thresholds.
 - tx-fifo-depth: the size of the controller's transmit fifo in bytes. This
   is used for components that can have configurable fifo sizes.
+- managed: string, specifies the PHY management type. Supported values are:
+  "auto", "in-band-status". "auto" is the default, it usess MDIO for
+  management if fixed-link is not specified.
 
 Child nodes of the Ethernet controller are typically the individual PHY devices
 connected via the MDIO bus (sometimes the MDIO bus controller is separate).
 They are described in the phy.txt file in this same directory.
+For non-MDIO PHY management see fixed-link.txt.
index d0e6fa38f335fcfa10f0840221bc663e4a5c9f31..b30ab6b5cbfa9f0f3666165c1fb9641a8d375fb5 100644 (file)
@@ -130,7 +130,11 @@ Required properties:
 
 Optional properties:
 - efuse-mac:   If this is 1, then the MAC address for the interface is
-               obtained from the device efuse mac address register
+               obtained from the device efuse mac address register.
+               If this is 2, the two DWORDs occupied by the MAC address
+               are swapped.  The netcp driver will swap the two DWORDs
+               back to the proper order when this property is set to 2
+               when it obtains the mac address from efuse.
 - local-mac-address:   the driver is designed to use the of_get_mac_address api
                        only if efuse-mac is 0. When efuse-mac is 0, the MAC
                        address is obtained from local-mac-address. If this
diff --git a/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt b/Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
new file mode 100644 (file)
index 0000000..51f8d2e
--- /dev/null
@@ -0,0 +1,75 @@
+* Synopsys DWC Ethernet QoS IP version 4.10 driver (GMAC)
+
+
+Required properties:
+- compatible: Should be "snps,dwc-qos-ethernet-4.10"
+- reg: Address and length of the register set for the device
+- clocks: Phandles to the reference clock and the bus clock
+- clock-names: Should be "phy_ref_clk" for the reference clock and "apb_pclk"
+  for the bus clock.
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- interrupts: Should contain the core's combined interrupt signal
+- phy-mode: See ethernet.txt file in the same directory
+
+Optional properties:
+- dma-coherent: Present if dma operations are coherent
+- mac-address: See ethernet.txt in the same directory
+- local-mac-address: See ethernet.txt in the same directory
+- snps,en-lpi: If present it enables use of the AXI low-power interface
+- snps,write-requests: Number of write requests that the AXI port can issue.
+  It depends on the SoC configuration.
+- snps,read-requests: Number of read requests that the AXI port can issue.
+  It depends on the SoC configuration.
+- snps,burst-map: Bitmap of allowed AXI burst lengts, with the LSB
+  representing 4, then 8 etc.
+- snps,txpbl: DMA Programmable burst length for the TX DMA
+- snps,rxpbl: DMA Programmable burst length for the RX DMA
+- snps,en-tx-lpi-clockgating: Enable gating of the MAC TX clock during
+  TX low-power mode.
+- phy-handle: See ethernet.txt file in the same directory
+- mdio device tree subnode: When the GMAC has a phy connected to its local
+    mdio, there must be device tree subnode with the following
+    required properties:
+    - compatible: Must be "snps,dwc-qos-ethernet-mdio".
+    - #address-cells: Must be <1>.
+    - #size-cells: Must be <0>.
+
+    For each phy on the mdio bus, there must be a node with the following
+    fields:
+
+    - reg: phy id used to communicate to phy.
+    - device_type: Must be "ethernet-phy".
+    - fixed-mode device tree subnode: see fixed-link.txt in the same directory
+
+Examples:
+ethernet2@40010000 {
+       clock-names = "phy_ref_clk", "apb_pclk";
+       clocks = <&clkc 17>, <&clkc 15>;
+       compatible = "snps,dwc-qos-ethernet-4.10";
+       interrupt-parent = <&intc>;
+       interrupts = <0x0 0x1e 0x4>;
+       reg = <0x40010000 0x4000>;
+       phy-handle = <&phy2>;
+       phy-mode = "gmii";
+
+       snps,en-tx-lpi-clockgating;
+       snps,en-lpi;
+       snps,write-requests = <2>;
+       snps,read-requests = <16>;
+       snps,burst-map = <0x7>;
+       snps,txpbl = <8>;
+       snps,rxpbl = <2>;
+
+       dma-coherent;
+
+       mdio {
+               #address-cells = <0x1>;
+               #size-cells = <0x0>;
+               phy2: phy@1 {
+                       compatible = "ethernet-phy-ieee802.3-c22";
+                       device_type = "ethernet-phy";
+                       reg = <0x1>;
+               };
+       };
+};
index 5fae7704daab292cf900158666c2d4bb80dd2424..56db1efd7189ac6b25fdfc4a18b579f28e01a4b7 100644 (file)
@@ -1215,14 +1215,20 @@ flowlabel_consistency - BOOLEAN
        FALSE: disabled
        Default: TRUE
 
-auto_flowlabels - BOOLEAN
-       Automatically generate flow labels based based on a flow hash
-       of the packet. This allows intermediate devices, such as routers,
-       to idenfify packet flows for mechanisms like Equal Cost Multipath
+auto_flowlabels - INTEGER
+       Automatically generate flow labels based on a flow hash of the
+       packet. This allows intermediate devices, such as routers, to
+       identify packet flows for mechanisms like Equal Cost Multipath
        Routing (see RFC 6438).
-       TRUE: enabled
-       FALSE: disabled
-       Default: false
+       0: automatic flow labels are completely disabled
+       1: automatic flow labels are enabled by default, they can be
+          disabled on a per socket basis using the IPV6_AUTOFLOWLABEL
+          socket option
+       2: automatic flow labels are allowed, they may be enabled on a
+          per socket basis using the IPV6_AUTOFLOWLABEL socket option
+       3: automatic flow labels are enabled and enforced, they cannot
+          be disabled by the socket option
+       Default: 1
 
 flowlabel_state_ranges - BOOLEAN
        Split the flow label number space into two ranges. 0-0x7FFFF is
@@ -1340,6 +1346,14 @@ accept_ra_from_local - BOOLEAN
           disabled if accept_ra_from_local is disabled
                on a specific interface.
 
+accept_ra_min_hop_limit - INTEGER
+       Minimum hop limit Information in Router Advertisement.
+
+       Hop limit Information in Router Advertisement less than this
+       variable shall be ignored.
+
+       Default: 1
+
 accept_ra_pinfo - BOOLEAN
        Learn Prefix Information in Router Advertisement.
 
@@ -1435,6 +1449,11 @@ mtu - INTEGER
        Default Maximum Transfer Unit
        Default: 1280 (IPv6 required minimum)
 
+ip_nonlocal_bind - BOOLEAN
+       If set, allows processes to bind() to non-local IPv6 addresses,
+       which can be quite useful - but may break some applications.
+       Default: 0
+
 router_probe_interval - INTEGER
        Minimum interval (in seconds) between Router Probing described
        in RFC4191.
@@ -1455,6 +1474,13 @@ router_solicitations - INTEGER
        routers are present.
        Default: 3
 
+use_oif_addrs_only - BOOLEAN
+       When enabled, the candidate source addresses for destinations
+       routed via this interface are restricted to the set of addresses
+       configured on this interface (vis. RFC 6724, section 4).
+
+       Default: false
+
 use_tempaddr - INTEGER
        Preference for Privacy Extensions (RFC3041).
          <= 0 : disable Privacy Extensions
index e655e2453c9842e8bc9d28cce3b61e79a54c1fbf..2903b1cf4d702c639fe2d43056fcf9dca1a641d9 100644 (file)
@@ -135,12 +135,8 @@ struct plat_stmmacenet_data {
        int maxmtu;
        void (*fix_mac_speed)(void *priv, unsigned int speed);
        void (*bus_setup)(void __iomem *ioaddr);
-       void *(*setup)(struct platform_device *pdev);
-       void (*free)(struct platform_device *pdev, void *priv);
        int (*init)(struct platform_device *pdev, void *priv);
        void (*exit)(struct platform_device *pdev, void *priv);
-       void *custom_cfg;
-       void *custom_data;
        void *bsp_priv;
 };
 
@@ -179,15 +175,11 @@ Where:
  o bus_setup: perform HW setup of the bus. For example, on some ST platforms
             this field is used to configure the AMBA  bridge to generate more
             efficient STBus traffic.
- o setup/init/exit: callbacks used for calling a custom initialization;
+ o init/exit: callbacks used for calling a custom initialization;
             this is sometime necessary on some platforms (e.g. ST boxes)
             where the HW needs to have set some PIO lines or system cfg
-            registers. setup should return a pointer to private data,
-            which will be stored in bsp_priv, and then passed to init and
-            exit callbacks. init/exit callbacks should not use or modify
+            registers.  init/exit callbacks should not use or modify
             platform data.
- o custom_cfg/custom_data: this is a custom configuration that can be passed
-                          while initializing the resources.
  o bsp_priv: another private pointer.
 
 For MDIO bus The we have:
@@ -278,8 +270,6 @@ capability register can replace what has been passed from the platform.
 Please see the following document:
        Documentation/devicetree/bindings/net/stmmac.txt
 
-and the stmmac_of_data structure inside the include/linux/stmmac.h header file.
-
 4.11) This is a summary of the content of some relevant files:
  o stmmac_main.c: to implement the main network device driver;
  o stmmac_mdio.c: to provide mdio functions;
index c5d7ade10ff21b720c0c41e7fb9f4106a11ada09..9825f32a86349c1469a114d0aa85318115fac1e0 100644 (file)
@@ -279,8 +279,18 @@ and unknown unicast packets to all ports in domain, if allowed by port's
 current STP state.  The switch driver, knowing which ports are within which
 vlan L2 domain, can program the switch device for flooding.  The packet should
 also be sent to the port netdev for processing by the bridge driver.  The
-bridge should not reflood the packet to the same ports the device flooded.
-XXX: the mechanism to avoid duplicate flood packets is being discuseed.
+bridge should not reflood the packet to the same ports the device flooded,
+otherwise there will be duplicate packets on the wire.
+
+To avoid duplicate packets, the device/driver should mark a packet as already
+forwarded using skb->offload_fwd_mark.  The same mark is set on the device
+ports in the domain using dev->offload_fwd_mark.  If the skb->offload_fwd_mark
+is non-zero and matches the forwarding egress port's dev->skb_mark, the kernel
+will drop the skb right before transmit on the egress port, with the
+understanding that the device already forwarded the packet on same egress port.
+The driver can use switchdev_port_fwd_mark_set() to set a globally unique mark
+for port's dev->offload_fwd_mark, based on the port's parent ID (switch ID) and
+a group ifindex.
 
 It is possible for the switch device to not handle flooding and push the
 packets up to the bridge driver for flooding.  This is not ideal as the number
index 5f0922613f1a8db7963fe3c44d21280d7290d84e..a977339fbe0a50d94877fdb8d9f2e8d902699612 100644 (file)
@@ -359,6 +359,13 @@ the requested fine-grained filtering for incoming packets is not
 supported, the driver may time stamp more than just the requested types
 of packets.
 
+Drivers are free to use a more permissive configuration than the requested
+configuration. It is expected that drivers should only implement directly the
+most generic mode that can be supported. For example if the hardware can
+support HWTSTAMP_FILTER_V2_EVENT, then it should generally always upscale
+HWTSTAMP_FILTER_V2_L2_SYNC_MESSAGE, and so forth, as HWTSTAMP_FILTER_V2_EVENT
+is more generic (and more useful to applications).
+
 A driver which supports hardware time stamping shall update the struct
 with the actual, possibly more permissive configuration. If the
 requested packets cannot be time stamped, then nothing should be
index 9289ecb57b68d17b612cdc3eb932627b6ac426d7..98ede02a96f2bf5666ab2cf14ef4caceb0fdf9cb 100644 (file)
@@ -6506,7 +6506,7 @@ F:        drivers/net/ethernet/marvell/mvneta.*
 
 MARVELL MWIFIEX WIRELESS DRIVER
 M:     Amitkumar Karwar <akarwar@marvell.com>
-M:     Avinash Patil <patila@marvell.com>
+M:     Nishant Sarmukadam <nishants@marvell.com>
 L:     linux-wireless@vger.kernel.org
 S:     Maintained
 F:     drivers/net/wireless/mwifiex/
@@ -6644,6 +6644,15 @@ W:       http://www.mellanox.com
 Q:     http://patchwork.ozlabs.org/project/netdev/list/
 F:     drivers/net/ethernet/mellanox/mlx4/en_*
 
+MELLANOX ETHERNET SWITCH DRIVERS
+M:     Jiri Pirko <jiri@mellanox.com>
+M:     Ido Schimmel <idosch@mellanox.com>
+L:     netdev@vger.kernel.org
+S:     Supported
+W:     http://www.mellanox.com
+Q:     http://patchwork.ozlabs.org/project/netdev/list/
+F:     drivers/net/ethernet/mellanox/mlxsw/
+
 MEMORY MANAGEMENT
 L:     linux-mm@kvack.org
 W:     http://www.linux-mm.org
@@ -8908,6 +8917,13 @@ F:       include/linux/dma/dw.h
 F:     include/linux/platform_data/dma-dw.h
 F:     drivers/dma/dw/
 
+SYNOPSYS DESIGNWARE ETHERNET QOS 4.10a driver
+M: Lars Persson <lars.persson@axis.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
+F: drivers/net/ethernet/synopsys/dwc_eth_qos.c
+
 SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
 M:     Seungwon Jeon <tgih.jun@samsung.com>
 M:     Jaehoon Chung <jh80.chung@samsung.com>
index c011e2296cb1e681863338972c6ce79f836bb6be..876060bcceeb3ea989e24fe18b42910f3cce4058 100644 (file)
@@ -857,7 +857,9 @@ b_epilogue:
                        emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
                        break;
                case BPF_ANC | SKF_AD_IFINDEX:
+               case BPF_ANC | SKF_AD_HATYPE:
                        /* A = skb->dev->ifindex */
+                       /* A = skb->dev->type */
                        ctx->seen |= SEEN_SKB;
                        off = offsetof(struct sk_buff, dev);
                        emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
@@ -867,8 +869,24 @@ b_epilogue:
 
                        BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
                                                  ifindex) != 4);
-                       off = offsetof(struct net_device, ifindex);
-                       emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
+                       BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
+                                                 type) != 2);
+
+                       if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
+                               off = offsetof(struct net_device, ifindex);
+                               emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
+                       } else {
+                               /*
+                                * offset of field "type" in "struct
+                                * net_device" is above what can be
+                                * used in the ldrh rd, [rn, #imm]
+                                * instruction, so load the offset in
+                                * a register and use ldrh rd, [rn, rm]
+                                */
+                               off = offsetof(struct net_device, type);
+                               emit_mov_i(ARM_R3, off, ctx);
+                               emit(ARM_LDRH_R(r_A, r_scratch, ARM_R3), ctx);
+                       }
                        break;
                case BPF_ANC | SKF_AD_MARK:
                        ctx->seen |= SEEN_SKB;
@@ -895,6 +913,17 @@ b_epilogue:
                                OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
                        }
                        break;
+               case BPF_ANC | SKF_AD_PKTTYPE:
+                       ctx->seen |= SEEN_SKB;
+                       BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
+                                                 __pkt_type_offset[0]) != 1);
+                       off = PKT_TYPE_OFFSET();
+                       emit(ARM_LDRB_I(r_A, r_skb, off), ctx);
+                       emit(ARM_AND_I(r_A, r_A, PKT_TYPE_MAX), ctx);
+#ifdef __BIG_ENDIAN_BITFIELD
+                       emit(ARM_LSR_I(r_A, r_A, 5), ctx);
+#endif
+                       break;
                case BPF_ANC | SKF_AD_QUEUE:
                        ctx->seen |= SEEN_SKB;
                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
@@ -904,6 +933,14 @@ b_epilogue:
                        off = offsetof(struct sk_buff, queue_mapping);
                        emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
                        break;
+               case BPF_ANC | SKF_AD_PAY_OFFSET:
+                       ctx->seen |= SEEN_SKB | SEEN_CALL;
+
+                       emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
+                       emit_mov_i(ARM_R3, (unsigned int)skb_get_poff, ctx);
+                       emit_blx_r(ARM_R3, ctx);
+                       emit(ARM_MOV_R(r_A, ARM_R0), ctx);
+                       break;
                case BPF_LDX | BPF_W | BPF_ABS:
                        /*
                         * load a 32bit word from struct seccomp_data.
index b2d7d92859d37e11c31f54c751a1cb78c419713b..4b17d5ab652a4029e7cc73165075a75068e0ef6c 100644 (file)
@@ -74,6 +74,7 @@
 #define ARM_INST_LDRB_I                0x05d00000
 #define ARM_INST_LDRB_R                0x07d00000
 #define ARM_INST_LDRH_I                0x01d000b0
+#define ARM_INST_LDRH_R                0x019000b0
 #define ARM_INST_LDR_I         0x05900000
 
 #define ARM_INST_LDM           0x08900000
                                 | (rm))
 #define ARM_LDRH_I(rt, rn, off)        (ARM_INST_LDRH_I | (rt) << 12 | (rn) << 16 \
                                 | (((off) & 0xf0) << 4) | ((off) & 0xf))
+#define ARM_LDRH_R(rt, rn, rm) (ARM_INST_LDRH_R | (rt) << 12 | (rn) << 16 \
+                                | (rm))
 
 #define ARM_LDM(rn, regs)      (ARM_INST_LDM | (rn) << 16 | (regs))
 
index f6498eec9ee17baa66d63a3c71db729ed6f7a3da..f010c93a88b16c1d909069c14237ff389fa759df 100644 (file)
@@ -36,6 +36,8 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
  *           |   BPF stack   |     |
  *           |               |     |
  *           +---------------+     |
+ *           | 8 byte skbp   |     |
+ * R15+170 -> +---------------+     |
  *           | 8 byte hlen   |     |
  * R15+168 -> +---------------+     |
  *           | 4 byte align  |     |
@@ -51,11 +53,12 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
  * We get 160 bytes stack space from calling function, but only use
  * 12 * 8 byte for old backchain, r15..r6, and tail_call_cnt.
  */
-#define STK_SPACE      (MAX_BPF_STACK + 8 + 4 + 4 + 160)
+#define STK_SPACE      (MAX_BPF_STACK + 8 + 8 + 4 + 4 + 160)
 #define STK_160_UNUSED (160 - 12 * 8)
 #define STK_OFF                (STK_SPACE - STK_160_UNUSED)
 #define STK_OFF_TMP    160     /* Offset of tmp buffer on stack */
 #define STK_OFF_HLEN   168     /* Offset of SKB header length on stack */
+#define STK_OFF_SKBP   170     /* Offset of SKB pointer on stack */
 
 #define STK_OFF_R6     (160 - 11 * 8)  /* Offset of r6 on stack */
 #define STK_OFF_TCCNT  (160 - 12 * 8)  /* Offset of tail_call_cnt on stack */
index 8d2e5165865f2c27b50fe8b1c6845de45666698a..9f4bbc09bf07b634092aede3058d89226ef60f97 100644 (file)
@@ -45,7 +45,7 @@ struct bpf_jit {
        int labels[1];          /* Labels for local jumps */
 };
 
-#define BPF_SIZE_MAX   4096    /* Max size for program */
+#define BPF_SIZE_MAX   0x7ffff /* Max size for program (20 bit signed displ) */
 
 #define SEEN_SKB       1       /* skb access */
 #define SEEN_MEM       2       /* use mem[] for temporary storage */
@@ -53,6 +53,7 @@ struct bpf_jit {
 #define SEEN_LITERAL   8       /* code uses literals */
 #define SEEN_FUNC      16      /* calls C functions */
 #define SEEN_TAIL_CALL 32      /* code uses tail calls */
+#define SEEN_SKB_CHANGE        64      /* code changes skb data */
 #define SEEN_STACK     (SEEN_FUNC | SEEN_MEM | SEEN_SKB)
 
 /*
@@ -203,19 +204,11 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
        _EMIT6(op1 | __disp, op2);                              \
 })
 
-#define EMIT6_DISP(op1, op2, b1, b2, b3, disp)                 \
-({                                                             \
-       _EMIT6_DISP(op1 | reg(b1, b2) << 16 |                   \
-                   reg_high(b3) << 8, op2, disp);              \
-       REG_SET_SEEN(b1);                                       \
-       REG_SET_SEEN(b2);                                       \
-       REG_SET_SEEN(b3);                                       \
-})
-
 #define _EMIT6_DISP_LH(op1, op2, disp)                         \
 ({                                                             \
-       unsigned int __disp_h = ((u32)disp) & 0xff000;          \
-       unsigned int __disp_l = ((u32)disp) & 0x00fff;          \
+       u32 _disp = (u32) disp;                                 \
+       unsigned int __disp_h = _disp & 0xff000;                \
+       unsigned int __disp_l = _disp & 0x00fff;                \
        _EMIT6(op1 | __disp_l, op2 | __disp_h >> 4);            \
 })
 
@@ -389,13 +382,33 @@ static void save_restore_regs(struct bpf_jit *jit, int op)
        } while (re <= 15);
 }
 
+/*
+ * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S"
+ * we store the SKB header length on the stack and the SKB data
+ * pointer in REG_SKB_DATA.
+ */
+static void emit_load_skb_data_hlen(struct bpf_jit *jit)
+{
+       /* Header length: llgf %w1,<len>(%b1) */
+       EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_1,
+                     offsetof(struct sk_buff, len));
+       /* s %w1,<data_len>(%b1) */
+       EMIT4_DISP(0x5b000000, REG_W1, BPF_REG_1,
+                  offsetof(struct sk_buff, data_len));
+       /* stg %w1,ST_OFF_HLEN(%r0,%r15) */
+       EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15, STK_OFF_HLEN);
+       /* lg %skb_data,data_off(%b1) */
+       EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
+                     BPF_REG_1, offsetof(struct sk_buff, data));
+}
+
 /*
  * Emit function prologue
  *
  * Save registers and create stack frame if necessary.
  * See stack frame layout desription in "bpf_jit.h"!
  */
-static void bpf_jit_prologue(struct bpf_jit *jit)
+static void bpf_jit_prologue(struct bpf_jit *jit, bool is_classic)
 {
        if (jit->seen & SEEN_TAIL_CALL) {
                /* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
@@ -429,32 +442,21 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
                        EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
                                      REG_15, 152);
        }
-       /*
-        * For SKB access %b1 contains the SKB pointer. For "bpf_jit.S"
-        * we store the SKB header length on the stack and the SKB data
-        * pointer in REG_SKB_DATA.
-        */
-       if (jit->seen & SEEN_SKB) {
-               /* Header length: llgf %w1,<len>(%b1) */
-               EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_1,
-                             offsetof(struct sk_buff, len));
-               /* s %w1,<data_len>(%b1) */
-               EMIT4_DISP(0x5b000000, REG_W1, BPF_REG_1,
-                          offsetof(struct sk_buff, data_len));
-               /* stg %w1,ST_OFF_HLEN(%r0,%r15) */
+       if (jit->seen & SEEN_SKB)
+               emit_load_skb_data_hlen(jit);
+       if (jit->seen & SEEN_SKB_CHANGE)
+               /* stg %b1,ST_OFF_SKBP(%r0,%r15) */
                EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15,
-                             STK_OFF_HLEN);
-               /* lg %skb_data,data_off(%b1) */
-               EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
-                             BPF_REG_1, offsetof(struct sk_buff, data));
+                             STK_OFF_SKBP);
+       /* Clear A (%b0) and X (%b7) registers for converted BPF programs */
+       if (is_classic) {
+               if (REG_SEEN(BPF_REG_A))
+                       /* lghi %ba,0 */
+                       EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
+               if (REG_SEEN(BPF_REG_X))
+                       /* lghi %bx,0 */
+                       EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
        }
-       /* BPF compatibility: clear A (%b0) and X (%b7) registers */
-       if (REG_SEEN(BPF_REG_A))
-               /* lghi %ba,0 */
-               EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
-       if (REG_SEEN(BPF_REG_X))
-               /* lghi %bx,0 */
-               EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
 }
 
 /*
@@ -976,12 +978,19 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
                REG_SET_SEEN(BPF_REG_5);
                jit->seen |= SEEN_FUNC;
                /* lg %w1,<d(imm)>(%l) */
-               EMIT6_DISP(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
-                          EMIT_CONST_U64(func));
+               EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
+                             EMIT_CONST_U64(func));
                /* basr %r14,%w1 */
                EMIT2(0x0d00, REG_14, REG_W1);
                /* lgr %b0,%r2: load return value into %b0 */
                EMIT4(0xb9040000, BPF_REG_0, REG_2);
+               if (bpf_helper_changes_skb_data((void *)func)) {
+                       jit->seen |= SEEN_SKB_CHANGE;
+                       /* lg %b1,ST_OFF_SKBP(%r15) */
+                       EMIT6_DISP_LH(0xe3000000, 0x0004, BPF_REG_1, REG_0,
+                                     REG_15, STK_OFF_SKBP);
+                       emit_load_skb_data_hlen(jit);
+               }
                break;
        }
        case BPF_JMP | BPF_CALL | BPF_X:
@@ -1236,7 +1245,7 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
        jit->lit = jit->lit_start;
        jit->prg = 0;
 
-       bpf_jit_prologue(jit);
+       bpf_jit_prologue(jit, bpf_prog_was_classic(fp));
        for (i = 0; i < fp->len; i += insn_count) {
                insn_count = bpf_jit_insn(jit, fp, i);
                if (insn_count < 0)
index 7931eeeb649af45af45aaa49a20fa727a6aecd40..f8b9f71b9a2b631816df61ff9b95657786e7cd51 100644 (file)
@@ -807,7 +807,7 @@ cond_branch:                        f_offset = addrs[i + filter[i].jf];
        }
 
        if (bpf_jit_enable > 1)
-               bpf_jit_dump(flen, proglen, pass, image);
+               bpf_jit_dump(flen, proglen, pass + 1, image);
 
        if (image) {
                bpf_flush_icache(image, image + proglen);
index be2e7a2b10d7169b39b0849703b305ebb52c8933..ec5214f39aa802ed923d10315a1672547493262d 100644 (file)
@@ -315,6 +315,26 @@ static void emit_bpf_tail_call(u8 **pprog)
        *pprog = prog;
 }
 
+
+static void emit_load_skb_data_hlen(u8 **pprog)
+{
+       u8 *prog = *pprog;
+       int cnt = 0;
+
+       /* r9d = skb->len - skb->data_len (headlen)
+        * r10 = skb->data
+        */
+       /* mov %r9d, off32(%rdi) */
+       EMIT3_off32(0x44, 0x8b, 0x8f, offsetof(struct sk_buff, len));
+
+       /* sub %r9d, off32(%rdi) */
+       EMIT3_off32(0x44, 0x2b, 0x8f, offsetof(struct sk_buff, data_len));
+
+       /* mov %r10, off32(%rdi) */
+       EMIT3_off32(0x4c, 0x8b, 0x97, offsetof(struct sk_buff, data));
+       *pprog = prog;
+}
+
 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
                  int oldproglen, struct jit_context *ctx)
 {
@@ -329,36 +349,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
 
        emit_prologue(&prog);
 
-       if (seen_ld_abs) {
-               /* r9d : skb->len - skb->data_len (headlen)
-                * r10 : skb->data
-                */
-               if (is_imm8(offsetof(struct sk_buff, len)))
-                       /* mov %r9d, off8(%rdi) */
-                       EMIT4(0x44, 0x8b, 0x4f,
-                             offsetof(struct sk_buff, len));
-               else
-                       /* mov %r9d, off32(%rdi) */
-                       EMIT3_off32(0x44, 0x8b, 0x8f,
-                                   offsetof(struct sk_buff, len));
-
-               if (is_imm8(offsetof(struct sk_buff, data_len)))
-                       /* sub %r9d, off8(%rdi) */
-                       EMIT4(0x44, 0x2b, 0x4f,
-                             offsetof(struct sk_buff, data_len));
-               else
-                       EMIT3_off32(0x44, 0x2b, 0x8f,
-                                   offsetof(struct sk_buff, data_len));
-
-               if (is_imm8(offsetof(struct sk_buff, data)))
-                       /* mov %r10, off8(%rdi) */
-                       EMIT4(0x4c, 0x8b, 0x57,
-                             offsetof(struct sk_buff, data));
-               else
-                       /* mov %r10, off32(%rdi) */
-                       EMIT3_off32(0x4c, 0x8b, 0x97,
-                                   offsetof(struct sk_buff, data));
-       }
+       if (seen_ld_abs)
+               emit_load_skb_data_hlen(&prog);
 
        for (i = 0; i < insn_cnt; i++, insn++) {
                const s32 imm32 = insn->imm;
@@ -367,6 +359,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
                u8 b1 = 0, b2 = 0, b3 = 0;
                s64 jmp_offset;
                u8 jmp_cond;
+               bool reload_skb_data;
                int ilen;
                u8 *func;
 
@@ -818,12 +811,18 @@ xadd:                     if (is_imm8(insn->off))
                        func = (u8 *) __bpf_call_base + imm32;
                        jmp_offset = func - (image + addrs[i]);
                        if (seen_ld_abs) {
-                               EMIT2(0x41, 0x52); /* push %r10 */
-                               EMIT2(0x41, 0x51); /* push %r9 */
-                               /* need to adjust jmp offset, since
-                                * pop %r9, pop %r10 take 4 bytes after call insn
-                                */
-                               jmp_offset += 4;
+                               reload_skb_data = bpf_helper_changes_skb_data(func);
+                               if (reload_skb_data) {
+                                       EMIT1(0x57); /* push %rdi */
+                                       jmp_offset += 22; /* pop, mov, sub, mov */
+                               } else {
+                                       EMIT2(0x41, 0x52); /* push %r10 */
+                                       EMIT2(0x41, 0x51); /* push %r9 */
+                                       /* need to adjust jmp offset, since
+                                        * pop %r9, pop %r10 take 4 bytes after call insn
+                                        */
+                                       jmp_offset += 4;
+                               }
                        }
                        if (!imm32 || !is_simm32(jmp_offset)) {
                                pr_err("unsupported bpf func %d addr %p image %p\n",
@@ -832,8 +831,13 @@ xadd:                      if (is_imm8(insn->off))
                        }
                        EMIT1_off32(0xE8, jmp_offset);
                        if (seen_ld_abs) {
-                               EMIT2(0x41, 0x59); /* pop %r9 */
-                               EMIT2(0x41, 0x5A); /* pop %r10 */
+                               if (reload_skb_data) {
+                                       EMIT1(0x5F); /* pop %rdi */
+                                       emit_load_skb_data_hlen(&prog);
+                               } else {
+                                       EMIT2(0x41, 0x59); /* pop %r9 */
+                                       EMIT2(0x41, 0x5A); /* pop %r10 */
+                               }
                        }
                        break;
 
@@ -1099,7 +1103,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
        }
 
        if (bpf_jit_enable > 1)
-               bpf_jit_dump(prog->len, proglen, 0, image);
+               bpf_jit_dump(prog->len, proglen, pass + 1, image);
 
        if (image) {
                bpf_flush_icache(header, image + proglen);
index 9635f1033ce5c46e7aba2863fa04a8bc86421aa9..8d973c4fc84e3185af77b6c4da0a64103be2e7b9 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/slab.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
+#include <linux/of_platform.h>
 
 MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
 MODULE_LICENSE("GPL");
@@ -409,6 +410,17 @@ int bcma_bus_register(struct bcma_bus *bus)
                bcma_core_pci_early_init(&bus->drv_pci[0]);
        }
 
+       /* TODO: remove check for IS_BUILTIN(CONFIG_BCMA) check when
+        * of_default_bus_match_table is exported or in some other way
+        * accessible. This is just a temporary workaround.
+        */
+       if (IS_BUILTIN(CONFIG_BCMA) && bus->host_pdev) {
+               struct device *dev = &bus->host_pdev->dev;
+
+               of_platform_populate(dev->of_node, of_default_bus_match_table,
+                                    NULL, dev);
+       }
+
        /* Cores providing flash access go before SPROM init */
        list_for_each_entry(core, &bus->cores, list) {
                if (bcma_is_core_needed_early(core->id.id))
index 2e777071e1dcb8bd544a16aedd58ca28ad2f43bc..79e8234b1aa5995eaf8b8e25d729e6da193518fe 100644 (file)
@@ -132,6 +132,7 @@ config BT_HCIUART_3WIRE
 config BT_HCIUART_INTEL
        bool "Intel protocol support"
        depends on BT_HCIUART
+       select BT_HCIUART_H4
        select BT_INTEL
        help
          The Intel protocol support enables Bluetooth HCI over serial
index fcfb72e9e0ee5948bf7e4c73e8d1b056d0723dbf..a5c4d0584389713522652c472cfd5c7b5e75dbab 100644 (file)
@@ -492,7 +492,7 @@ static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
        case HCI_SCODATA_PKT:
                hdev->stat.sco_tx++;
                break;
-       };
+       }
 
        /* Prepend skb with frame type */
        memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
index 7aab65427d388fc4c653223915ab033904032758..a00bb82eb7c6d8322f835c8ce5689eae784bc5f2 100644 (file)
@@ -427,7 +427,7 @@ static int bt3c_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
        case HCI_SCODATA_PKT:
                hdev->stat.sco_tx++;
                break;
-       };
+       }
 
        /* Prepend skb with frame type */
        memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
index 9ceb8ac68fdca2518c953cb6a9a36c6fd5b5f268..02ed816a18f9a2b652bbd0d179d150f50cad0294 100644 (file)
@@ -34,6 +34,7 @@
 
 #define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}})
 #define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}})
+#define BDADDR_BCM4330B1 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb1, 0x30, 0x43}})
 
 int btbcm_check_bdaddr(struct hci_dev *hdev)
 {
@@ -66,9 +67,13 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
         *
         * The address 43:24:B3:00:00:00 indicates a BCM4324B3 controller
         * with waiting for configuration state.
+        *
+        * The address 43:30:B1:00:00:00 indicates a BCM4330B1 controller
+        * with waiting for configuration state.
         */
        if (!bacmp(&bda->bdaddr, BDADDR_BCM20702A0) ||
-           !bacmp(&bda->bdaddr, BDADDR_BCM4324B3)) {
+           !bacmp(&bda->bdaddr, BDADDR_BCM4324B3) ||
+           !bacmp(&bda->bdaddr, BDADDR_BCM4330B1)) {
                BT_INFO("%s: BCM: Using default device address (%pMR)",
                        hdev->name, &bda->bdaddr);
                set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
@@ -241,6 +246,7 @@ static const struct {
        u16 subver;
        const char *name;
 } bcm_uart_subver_table[] = {
+       { 0x4103, "BCM4330B1"   },      /* 002.001.003 */
        { 0x410e, "BCM43341B0"  },      /* 002.001.014 */
        { 0x4406, "BCM4324B3"   },      /* 002.004.006 */
        { 0x610c, "BCM4354"     },      /* 003.001.012 */
index 828f2f8d1568c8c50962dee7d8e77fcfcd669972..1ce4ac16c7facdd1387119619317ae8c3fcea5bc 100644 (file)
@@ -89,6 +89,86 @@ int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
 }
 EXPORT_SYMBOL_GPL(btintel_set_bdaddr);
 
+void btintel_hw_error(struct hci_dev *hdev, u8 code)
+{
+       struct sk_buff *skb;
+       u8 type = 0x00;
+
+       BT_ERR("%s: Hardware error 0x%2.2x", hdev->name, code);
+
+       skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: Reset after hardware error failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return;
+       }
+       kfree_skb(skb);
+
+       skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: Retrieving Intel exception info failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return;
+       }
+
+       if (skb->len != 13) {
+               BT_ERR("%s: Exception info size mismatch", hdev->name);
+               kfree_skb(skb);
+               return;
+       }
+
+       BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1));
+
+       kfree_skb(skb);
+}
+EXPORT_SYMBOL_GPL(btintel_hw_error);
+
+void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver)
+{
+       const char *variant;
+
+       switch (ver->fw_variant) {
+       case 0x06:
+               variant = "Bootloader";
+               break;
+       case 0x23:
+               variant = "Firmware";
+               break;
+       default:
+               return;
+       }
+
+       BT_INFO("%s: %s revision %u.%u build %u week %u %u", hdev->name,
+               variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f,
+               ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy);
+}
+EXPORT_SYMBOL_GPL(btintel_version_info);
+
+int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
+                       const void *param)
+{
+       while (plen > 0) {
+               struct sk_buff *skb;
+               u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen;
+
+               cmd_param[0] = fragment_type;
+               memcpy(cmd_param + 1, param, fragment_len);
+
+               skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1,
+                                    cmd_param, HCI_INIT_TIMEOUT);
+               if (IS_ERR(skb))
+                       return PTR_ERR(skb);
+
+               kfree_skb(skb);
+
+               plen -= fragment_len;
+               param += fragment_len;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_secure_send);
+
 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
 MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
 MODULE_VERSION(VERSION);
index 4bda6ab34f60292da156b79314fcac9284aa4193..b278d14758d592896db033c78af53b6565a6c88f 100644 (file)
@@ -73,6 +73,11 @@ struct intel_secure_send_result {
 
 int btintel_check_bdaddr(struct hci_dev *hdev);
 int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+void btintel_hw_error(struct hci_dev *hdev, u8 code);
+
+void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver);
+int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
+                       const void *param);
 
 #else
 
@@ -86,4 +91,18 @@ static inline int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdadd
        return -EOPNOTSUPP;
 }
 
+static inline void btintel_hw_error(struct hci_dev *hdev, u8 code)
+{
+}
+
+static void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver)
+{
+}
+
+static inline int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type,
+                                     u32 plen, const void *param)
+{
+       return -EOPNOTSUPP;
+}
+
 #endif
index 086f0ec89580627d4c95516c0ef912265445afef..27a9aac2558326c8f7801b891ef4b5f7441b1071 100644 (file)
@@ -95,10 +95,10 @@ struct btmrvl_private {
        struct btmrvl_device btmrvl_dev;
        struct btmrvl_adapter *adapter;
        struct btmrvl_thread main_thread;
-       int (*hw_host_to_card) (struct btmrvl_private *priv,
+       int (*hw_host_to_card)(struct btmrvl_private *priv,
                                u8 *payload, u16 nb);
-       int (*hw_wakeup_firmware) (struct btmrvl_private *priv);
-       int (*hw_process_int_status) (struct btmrvl_private *priv);
+       int (*hw_wakeup_firmware)(struct btmrvl_private *priv);
+       int (*hw_process_int_status)(struct btmrvl_private *priv);
        void (*firmware_dump)(struct btmrvl_private *priv);
        spinlock_t driver_lock;         /* spinlock used by driver */
 #ifdef CONFIG_DEBUG_FS
index b4cf8d9c9dac29893241cb9b814879969ed0906e..cc92b0f84a5168e139435737cef2c63ab1ee68e6 100644 (file)
@@ -68,6 +68,9 @@ static const struct usb_device_id btusb_table[] = {
        /* Generic Bluetooth AMP device */
        { USB_DEVICE_INFO(0xe0, 0x01, 0x04), .driver_info = BTUSB_AMP },
 
+       /* Generic Bluetooth USB interface */
+       { USB_INTERFACE_INFO(0xe0, 0x01, 0x01) },
+
        /* Apple-specific (Broadcom) devices */
        { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01),
          .driver_info = BTUSB_BCM_APPLE },
@@ -1878,51 +1881,6 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
        return -EILSEQ;
 }
 
-static int btusb_intel_secure_send(struct hci_dev *hdev, u8 fragment_type,
-                                  u32 plen, const void *param)
-{
-       while (plen > 0) {
-               struct sk_buff *skb;
-               u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen;
-
-               cmd_param[0] = fragment_type;
-               memcpy(cmd_param + 1, param, fragment_len);
-
-               skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1,
-                                    cmd_param, HCI_INIT_TIMEOUT);
-               if (IS_ERR(skb))
-                       return PTR_ERR(skb);
-
-               kfree_skb(skb);
-
-               plen -= fragment_len;
-               param += fragment_len;
-       }
-
-       return 0;
-}
-
-static void btusb_intel_version_info(struct hci_dev *hdev,
-                                    struct intel_version *ver)
-{
-       const char *variant;
-
-       switch (ver->fw_variant) {
-       case 0x06:
-               variant = "Bootloader";
-               break;
-       case 0x23:
-               variant = "Firmware";
-               break;
-       default:
-               return;
-       }
-
-       BT_INFO("%s: %s revision %u.%u build %u week %u %u", hdev->name,
-               variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f,
-               ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy);
-}
-
 static int btusb_setup_intel_new(struct hci_dev *hdev)
 {
        static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
@@ -1984,7 +1942,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
                return -EINVAL;
        }
 
-       btusb_intel_version_info(hdev, ver);
+       btintel_version_info(hdev, ver);
 
        /* The firmware variant determines if the device is in bootloader
         * mode or is running operational firmware. The value 0x06 identifies
@@ -2104,7 +2062,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
        /* Start the firmware download transaction with the Init fragment
         * represented by the 128 bytes of CSS header.
         */
-       err = btusb_intel_secure_send(hdev, 0x00, 128, fw->data);
+       err = btintel_secure_send(hdev, 0x00, 128, fw->data);
        if (err < 0) {
                BT_ERR("%s: Failed to send firmware header (%d)",
                       hdev->name, err);
@@ -2114,7 +2072,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
        /* Send the 256 bytes of public key information from the firmware
         * as the PKey fragment.
         */
-       err = btusb_intel_secure_send(hdev, 0x03, 256, fw->data + 128);
+       err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
        if (err < 0) {
                BT_ERR("%s: Failed to send firmware public key (%d)",
                       hdev->name, err);
@@ -2124,7 +2082,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
        /* Send the 256 bytes of signature information from the firmware
         * as the Sign fragment.
         */
-       err = btusb_intel_secure_send(hdev, 0x02, 256, fw->data + 388);
+       err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
        if (err < 0) {
                BT_ERR("%s: Failed to send firmware signature (%d)",
                       hdev->name, err);
@@ -2148,8 +2106,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
                 * firmware data buffer as a single Data fragement.
                 */
                if (!(frag_len % 4)) {
-                       err = btusb_intel_secure_send(hdev, 0x01, frag_len,
-                                                     fw_ptr);
+                       err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
                        if (err < 0) {
                                BT_ERR("%s: Failed to send firmware data (%d)",
                                       hdev->name, err);
@@ -2291,39 +2248,6 @@ done:
        return 0;
 }
 
-static void btusb_hw_error_intel(struct hci_dev *hdev, u8 code)
-{
-       struct sk_buff *skb;
-       u8 type = 0x00;
-
-       BT_ERR("%s: Hardware error 0x%2.2x", hdev->name, code);
-
-       skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
-       if (IS_ERR(skb)) {
-               BT_ERR("%s: Reset after hardware error failed (%ld)",
-                      hdev->name, PTR_ERR(skb));
-               return;
-       }
-       kfree_skb(skb);
-
-       skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT);
-       if (IS_ERR(skb)) {
-               BT_ERR("%s: Retrieving Intel exception info failed (%ld)",
-                      hdev->name, PTR_ERR(skb));
-               return;
-       }
-
-       if (skb->len != 13) {
-               BT_ERR("%s: Exception info size mismatch", hdev->name);
-               kfree_skb(skb);
-               return;
-       }
-
-       BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1));
-
-       kfree_skb(skb);
-}
-
 static int btusb_shutdown_intel(struct hci_dev *hdev)
 {
        struct sk_buff *skb;
@@ -2783,7 +2707,7 @@ static int btusb_probe(struct usb_interface *intf,
        if (id->driver_info & BTUSB_INTEL_NEW) {
                hdev->send = btusb_send_frame_intel;
                hdev->setup = btusb_setup_intel_new;
-               hdev->hw_error = btusb_hw_error_intel;
+               hdev->hw_error = btintel_hw_error;
                hdev->set_bdaddr = btintel_set_bdaddr;
                set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
        }
index 78e10f0c65b28dc38d30336514b92614bc7e449c..84135c54ed2e46df5111e2c2797b82fa8db4b143 100644 (file)
@@ -182,9 +182,9 @@ static void dtl1_control(struct dtl1_info *info, struct sk_buff *skb)
        int i;
 
        printk(KERN_INFO "Bluetooth: Nokia control data =");
-       for (i = 0; i < skb->len; i++) {
+       for (i = 0; i < skb->len; i++)
                printk(" %02x", skb->data[i]);
-       }
+
        printk("\n");
 
        /* transition to active state */
@@ -406,7 +406,7 @@ static int dtl1_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
                break;
        default:
                return -EILSEQ;
-       };
+       }
 
        nsh.zero = 0;
        nsh.len = skb->len;
index 3455cecc9ecfe630c3331d67d22715ddb60cfcf8..b35b238a0380197fda6ee096e388a6fc09aa11a7 100644 (file)
@@ -75,7 +75,7 @@ struct h5 {
        size_t                  rx_pending;     /* Expecting more bytes */
        u8                      rx_ack;         /* Last ack number received */
 
-       int                     (*rx_func) (struct hci_uart *hu, u8 c);
+       int                     (*rx_func)(struct hci_uart *hu, u8 c);
 
        struct timer_list       timer;          /* Retransmission timer */
 
index 5dd07bf052360c15bc8631551def5e6cce687626..21dfa89751dfe6b790704d223d770d229ddf2c02 100644 (file)
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/skbuff.h>
+#include <linux/firmware.h>
+#include <linux/wait.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
 #include "hci_uart.h"
+#include "btintel.h"
+
+#define STATE_BOOTLOADER       0
+#define STATE_DOWNLOADING      1
+#define STATE_FIRMWARE_LOADED  2
+#define STATE_FIRMWARE_FAILED  3
+#define STATE_BOOTING          4
+
+struct intel_data {
+       struct sk_buff *rx_skb;
+       struct sk_buff_head txq;
+       unsigned long flags;
+};
+
+static int intel_open(struct hci_uart *hu)
+{
+       struct intel_data *intel;
+
+       BT_DBG("hu %p", hu);
+
+       intel = kzalloc(sizeof(*intel), GFP_KERNEL);
+       if (!intel)
+               return -ENOMEM;
+
+       skb_queue_head_init(&intel->txq);
+
+       hu->priv = intel;
+       return 0;
+}
+
+static int intel_close(struct hci_uart *hu)
+{
+       struct intel_data *intel = hu->priv;
+
+       BT_DBG("hu %p", hu);
+
+       skb_queue_purge(&intel->txq);
+       kfree_skb(intel->rx_skb);
+       kfree(intel);
+
+       hu->priv = NULL;
+       return 0;
+}
+
+static int intel_flush(struct hci_uart *hu)
+{
+       struct intel_data *intel = hu->priv;
+
+       BT_DBG("hu %p", hu);
+
+       skb_queue_purge(&intel->txq);
+
+       return 0;
+}
+
+static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode)
+{
+       struct sk_buff *skb;
+       struct hci_event_hdr *hdr;
+       struct hci_ev_cmd_complete *evt;
+
+       skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC);
+       if (!skb)
+               return -ENOMEM;
+
+       hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr));
+       hdr->evt = HCI_EV_CMD_COMPLETE;
+       hdr->plen = sizeof(*evt) + 1;
+
+       evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt));
+       evt->ncmd = 0x01;
+       evt->opcode = cpu_to_le16(opcode);
+
+       *skb_put(skb, 1) = 0x00;
+
+       bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
+
+       return hci_recv_frame(hdev, skb);
+}
+
+static int intel_setup(struct hci_uart *hu)
+{
+       static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01,
+                                         0x00, 0x08, 0x04, 0x00 };
+       struct intel_data *intel = hu->priv;
+       struct hci_dev *hdev = hu->hdev;
+       struct sk_buff *skb;
+       struct intel_version *ver;
+       struct intel_boot_params *params;
+       const struct firmware *fw;
+       const u8 *fw_ptr;
+       char fwname[64];
+       u32 frag_len;
+       ktime_t calltime, delta, rettime;
+       unsigned long long duration;
+       int err;
+
+       BT_DBG("%s", hdev->name);
+
+       hu->hdev->set_bdaddr = btintel_set_bdaddr;
+
+       calltime = ktime_get();
+
+       set_bit(STATE_BOOTLOADER, &intel->flags);
+
+       /* Read the Intel version information to determine if the device
+        * is in bootloader mode or if it already has operational firmware
+        * loaded.
+        */
+       skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: Reading Intel version information failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return PTR_ERR(skb);
+       }
+
+       if (skb->len != sizeof(*ver)) {
+               BT_ERR("%s: Intel version event size mismatch", hdev->name);
+               kfree_skb(skb);
+               return -EILSEQ;
+       }
+
+       ver = (struct intel_version *)skb->data;
+       if (ver->status) {
+               BT_ERR("%s: Intel version command failure (%02x)",
+                      hdev->name, ver->status);
+               err = -bt_to_errno(ver->status);
+               kfree_skb(skb);
+               return err;
+       }
+
+       /* The hardware platform number has a fixed value of 0x37 and
+        * for now only accept this single value.
+        */
+       if (ver->hw_platform != 0x37) {
+               BT_ERR("%s: Unsupported Intel hardware platform (%u)",
+                      hdev->name, ver->hw_platform);
+               kfree_skb(skb);
+               return -EINVAL;
+       }
+
+       /* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is
+        * supported by this firmware loading method. This check has been
+        * put in place to ensure correct forward compatibility options
+        * when newer hardware variants come along.
+        */
+       if (ver->hw_variant != 0x0b) {
+               BT_ERR("%s: Unsupported Intel hardware variant (%u)",
+                      hdev->name, ver->hw_variant);
+               kfree_skb(skb);
+               return -EINVAL;
+       }
+
+       btintel_version_info(hdev, ver);
+
+       /* The firmware variant determines if the device is in bootloader
+        * mode or is running operational firmware. The value 0x06 identifies
+        * the bootloader and the value 0x23 identifies the operational
+        * firmware.
+        *
+        * When the operational firmware is already present, then only
+        * the check for valid Bluetooth device address is needed. This
+        * determines if the device will be added as configured or
+        * unconfigured controller.
+        *
+        * It is not possible to use the Secure Boot Parameters in this
+        * case since that command is only available in bootloader mode.
+        */
+       if (ver->fw_variant == 0x23) {
+               kfree_skb(skb);
+               clear_bit(STATE_BOOTLOADER, &intel->flags);
+               btintel_check_bdaddr(hdev);
+               return 0;
+       }
+
+       /* If the device is not in bootloader mode, then the only possible
+        * choice is to return an error and abort the device initialization.
+        */
+       if (ver->fw_variant != 0x06) {
+               BT_ERR("%s: Unsupported Intel firmware variant (%u)",
+                      hdev->name, ver->fw_variant);
+               kfree_skb(skb);
+               return -ENODEV;
+       }
+
+       kfree_skb(skb);
+
+       /* Read the secure boot parameters to identify the operating
+        * details of the bootloader.
+        */
+       skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: Reading Intel boot parameters failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return PTR_ERR(skb);
+       }
+
+       if (skb->len != sizeof(*params)) {
+               BT_ERR("%s: Intel boot parameters size mismatch", hdev->name);
+               kfree_skb(skb);
+               return -EILSEQ;
+       }
+
+       params = (struct intel_boot_params *)skb->data;
+       if (params->status) {
+               BT_ERR("%s: Intel boot parameters command failure (%02x)",
+                      hdev->name, params->status);
+               err = -bt_to_errno(params->status);
+               kfree_skb(skb);
+               return err;
+       }
+
+       BT_INFO("%s: Device revision is %u", hdev->name,
+               le16_to_cpu(params->dev_revid));
+
+       BT_INFO("%s: Secure boot is %s", hdev->name,
+               params->secure_boot ? "enabled" : "disabled");
+
+       BT_INFO("%s: Minimum firmware build %u week %u %u", hdev->name,
+               params->min_fw_build_nn, params->min_fw_build_cw,
+               2000 + params->min_fw_build_yy);
+
+       /* It is required that every single firmware fragment is acknowledged
+        * with a command complete event. If the boot parameters indicate
+        * that this bootloader does not send them, then abort the setup.
+        */
+       if (params->limited_cce != 0x00) {
+               BT_ERR("%s: Unsupported Intel firmware loading method (%u)",
+                      hdev->name, params->limited_cce);
+               kfree_skb(skb);
+               return -EINVAL;
+       }
+
+       /* If the OTP has no valid Bluetooth device address, then there will
+        * also be no valid address for the operational firmware.
+        */
+       if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) {
+               BT_INFO("%s: No device address configured", hdev->name);
+               set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
+       }
+
+       /* With this Intel bootloader only the hardware variant and device
+        * revision information are used to select the right firmware.
+        *
+        * Currently this bootloader support is limited to hardware variant
+        * iBT 3.0 (LnP/SfP) which is identified by the value 11 (0x0b).
+        */
+       snprintf(fwname, sizeof(fwname), "intel/ibt-11-%u.sfi",
+                le16_to_cpu(params->dev_revid));
+
+       err = request_firmware(&fw, fwname, &hdev->dev);
+       if (err < 0) {
+               BT_ERR("%s: Failed to load Intel firmware file (%d)",
+                      hdev->name, err);
+               kfree_skb(skb);
+               return err;
+       }
+
+       BT_INFO("%s: Found device firmware: %s", hdev->name, fwname);
+
+       kfree_skb(skb);
+
+       if (fw->size < 644) {
+               BT_ERR("%s: Invalid size of firmware file (%zu)",
+                      hdev->name, fw->size);
+               err = -EBADF;
+               goto done;
+       }
+
+       set_bit(STATE_DOWNLOADING, &intel->flags);
+
+       /* Start the firmware download transaction with the Init fragment
+        * represented by the 128 bytes of CSS header.
+        */
+       err = btintel_secure_send(hdev, 0x00, 128, fw->data);
+       if (err < 0) {
+               BT_ERR("%s: Failed to send firmware header (%d)",
+                      hdev->name, err);
+               goto done;
+       }
+
+       /* Send the 256 bytes of public key information from the firmware
+        * as the PKey fragment.
+        */
+       err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128);
+       if (err < 0) {
+               BT_ERR("%s: Failed to send firmware public key (%d)",
+                      hdev->name, err);
+               goto done;
+       }
+
+       /* Send the 256 bytes of signature information from the firmware
+        * as the Sign fragment.
+        */
+       err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388);
+       if (err < 0) {
+               BT_ERR("%s: Failed to send firmware signature (%d)",
+                      hdev->name, err);
+               goto done;
+       }
+
+       fw_ptr = fw->data + 644;
+       frag_len = 0;
+
+       while (fw_ptr - fw->data < fw->size) {
+               struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
+
+               frag_len += sizeof(*cmd) + cmd->plen;
+
+               BT_DBG("%s: patching %td/%zu", hdev->name,
+                      (fw_ptr - fw->data), fw->size);
+
+               /* The parameter length of the secure send command requires
+                * a 4 byte alignment. It happens so that the firmware file
+                * contains proper Intel_NOP commands to align the fragments
+                * as needed.
+                *
+                * Send set of commands with 4 byte alignment from the
+                * firmware data buffer as a single Data fragement.
+                */
+               if (frag_len % 4)
+                       continue;
+
+               /* Send each command from the firmware data buffer as
+                * a single Data fragment.
+                */
+               err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr);
+               if (err < 0) {
+                       BT_ERR("%s: Failed to send firmware data (%d)",
+                              hdev->name, err);
+                       goto done;
+               }
+
+               fw_ptr += frag_len;
+               frag_len = 0;
+       }
+
+       set_bit(STATE_FIRMWARE_LOADED, &intel->flags);
+
+       BT_INFO("%s: Waiting for firmware download to complete", hdev->name);
+
+       /* Before switching the device into operational mode and with that
+        * booting the loaded firmware, wait for the bootloader notification
+        * that all fragments have been successfully received.
+        *
+        * When the event processing receives the notification, then the
+        * STATE_DOWNLOADING flag will be cleared.
+        *
+        * The firmware loading should not take longer than 5 seconds
+        * and thus just timeout if that happens and fail the setup
+        * of this device.
+        */
+       err = wait_on_bit_timeout(&intel->flags, STATE_DOWNLOADING,
+                                 TASK_INTERRUPTIBLE,
+                                 msecs_to_jiffies(5000));
+       if (err == 1) {
+               BT_ERR("%s: Firmware loading interrupted", hdev->name);
+               err = -EINTR;
+               goto done;
+       }
+
+       if (err) {
+               BT_ERR("%s: Firmware loading timeout", hdev->name);
+               err = -ETIMEDOUT;
+               goto done;
+       }
+
+       if (test_bit(STATE_FIRMWARE_FAILED, &intel->flags)) {
+               BT_ERR("%s: Firmware loading failed", hdev->name);
+               err = -ENOEXEC;
+               goto done;
+       }
+
+       rettime = ktime_get();
+       delta = ktime_sub(rettime, calltime);
+       duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
+       BT_INFO("%s: Firmware loaded in %llu usecs", hdev->name, duration);
+
+done:
+       release_firmware(fw);
+
+       if (err < 0)
+               return err;
+
+       calltime = ktime_get();
+
+       set_bit(STATE_BOOTING, &intel->flags);
+
+       skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(reset_param), reset_param,
+                            HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       kfree_skb(skb);
+
+       /* The bootloader will not indicate when the device is ready. This
+        * is done by the operational firmware sending bootup notification.
+        *
+        * Booting into operational firmware should not take longer than
+        * 1 second. However if that happens, then just fail the setup
+        * since something went wrong.
+        */
+       BT_INFO("%s: Waiting for device to boot", hdev->name);
+
+       err = wait_on_bit_timeout(&intel->flags, STATE_BOOTING,
+                                 TASK_INTERRUPTIBLE,
+                                 msecs_to_jiffies(1000));
+
+       if (err == 1) {
+               BT_ERR("%s: Device boot interrupted", hdev->name);
+               return -EINTR;
+       }
+
+       if (err) {
+               BT_ERR("%s: Device boot timeout", hdev->name);
+               return -ETIMEDOUT;
+       }
+
+       rettime = ktime_get();
+       delta = ktime_sub(rettime, calltime);
+       duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+
+       BT_INFO("%s: Device booted in %llu usecs", hdev->name, duration);
+
+       clear_bit(STATE_BOOTLOADER, &intel->flags);
+
+       return 0;
+}
+
+static int intel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_uart *hu = hci_get_drvdata(hdev);
+       struct intel_data *intel = hu->priv;
+       struct hci_event_hdr *hdr;
+
+       if (!test_bit(STATE_BOOTLOADER, &intel->flags))
+               goto recv;
+
+       hdr = (void *)skb->data;
+
+       /* When the firmware loading completes the device sends
+        * out a vendor specific event indicating the result of
+        * the firmware loading.
+        */
+       if (skb->len == 7 && hdr->evt == 0xff && hdr->plen == 0x05 &&
+           skb->data[2] == 0x06) {
+               if (skb->data[3] != 0x00)
+                       set_bit(STATE_FIRMWARE_FAILED, &intel->flags);
+
+               if (test_and_clear_bit(STATE_DOWNLOADING, &intel->flags) &&
+                   test_bit(STATE_FIRMWARE_LOADED, &intel->flags)) {
+                       smp_mb__after_atomic();
+                       wake_up_bit(&intel->flags, STATE_DOWNLOADING);
+               }
+
+       /* When switching to the operational firmware the device
+        * sends a vendor specific event indicating that the bootup
+        * completed.
+        */
+       } else if (skb->len == 9 && hdr->evt == 0xff && hdr->plen == 0x07 &&
+                  skb->data[2] == 0x02) {
+               if (test_and_clear_bit(STATE_BOOTING, &intel->flags)) {
+                       smp_mb__after_atomic();
+                       wake_up_bit(&intel->flags, STATE_BOOTING);
+               }
+       }
+recv:
+       return hci_recv_frame(hdev, skb);
+}
+
+static const struct h4_recv_pkt intel_recv_pkts[] = {
+       { H4_RECV_ACL,   .recv = hci_recv_frame },
+       { H4_RECV_SCO,   .recv = hci_recv_frame },
+       { H4_RECV_EVENT, .recv = intel_recv_event },
+};
+
+static int intel_recv(struct hci_uart *hu, const void *data, int count)
+{
+       struct intel_data *intel = hu->priv;
+
+       if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+               return -EUNATCH;
+
+       intel->rx_skb = h4_recv_buf(hu->hdev, intel->rx_skb, data, count,
+                                   intel_recv_pkts,
+                                   ARRAY_SIZE(intel_recv_pkts));
+       if (IS_ERR(intel->rx_skb)) {
+               int err = PTR_ERR(intel->rx_skb);
+               BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+               intel->rx_skb = NULL;
+               return err;
+       }
+
+       return count;
+}
+
+static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+       struct intel_data *intel = hu->priv;
+
+       BT_DBG("hu %p skb %p", hu, skb);
+
+       skb_queue_tail(&intel->txq, skb);
+
+       return 0;
+}
+
+static struct sk_buff *intel_dequeue(struct hci_uart *hu)
+{
+       struct intel_data *intel = hu->priv;
+       struct sk_buff *skb;
+
+       skb = skb_dequeue(&intel->txq);
+       if (!skb)
+               return skb;
+
+       if (test_bit(STATE_BOOTLOADER, &intel->flags) &&
+           (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT)) {
+               struct hci_command_hdr *cmd = (void *)skb->data;
+               __u16 opcode = le16_to_cpu(cmd->opcode);
+
+               /* When the 0xfc01 command is issued to boot into
+                * the operational firmware, it will actually not
+                * send a command complete event. To keep the flow
+                * control working inject that event here.
+                */
+               if (opcode == 0xfc01)
+                       inject_cmd_complete(hu->hdev, opcode);
+       }
+
+       /* Prepend skb with frame type */
+       memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);
+
+       return skb;
+}
+
+static const struct hci_uart_proto intel_proto = {
+       .id             = HCI_UART_INTEL,
+       .name           = "Intel",
+       .init_speed     = 115200,
+       .open           = intel_open,
+       .close          = intel_close,
+       .flush          = intel_flush,
+       .setup          = intel_setup,
+       .recv           = intel_recv,
+       .enqueue        = intel_enqueue,
+       .dequeue        = intel_dequeue,
+};
+
+int __init intel_init(void)
+{
+       return hci_uart_register_proto(&intel_proto);
+}
+
+int __exit intel_deinit(void)
+{
+       return hci_uart_unregister_proto(&intel_proto);
+}
index 177dd69fdd954151c3742c5cf443d439ad0edc0c..20c2ac193ff972a9ba8717092f285f81f9aaad59 100644 (file)
@@ -770,7 +770,7 @@ static int __init hci_uart_init(void)
 
        /* Register the tty discipline */
 
-       memset(&hci_uart_ldisc, 0, sizeof (hci_uart_ldisc));
+       memset(&hci_uart_ldisc, 0, sizeof(hci_uart_ldisc));
        hci_uart_ldisc.magic            = TTY_LDISC_MAGIC;
        hci_uart_ldisc.name             = "n_hci";
        hci_uart_ldisc.open             = hci_uart_tty_open;
@@ -804,6 +804,9 @@ static int __init hci_uart_init(void)
 #ifdef CONFIG_BT_HCIUART_3WIRE
        h5_init();
 #endif
+#ifdef CONFIG_BT_HCIUART_INTEL
+       intel_init();
+#endif
 #ifdef CONFIG_BT_HCIUART_BCM
        bcm_init();
 #endif
@@ -830,6 +833,9 @@ static void __exit hci_uart_exit(void)
 #ifdef CONFIG_BT_HCIUART_3WIRE
        h5_deinit();
 #endif
+#ifdef CONFIG_BT_HCIUART_INTEL
+       intel_deinit();
+#endif
 #ifdef CONFIG_BT_HCIUART_BCM
        bcm_deinit();
 #endif
index ce9c670956f54d414aadaec5f9810fac7e058abd..496587a73a9daa4a2a70ef92bd9fc04b0ef72dbf 100644 (file)
@@ -167,6 +167,11 @@ int h5_init(void);
 int h5_deinit(void);
 #endif
 
+#ifdef CONFIG_BT_HCIUART_INTEL
+int intel_init(void);
+int intel_deinit(void);
+#endif
+
 #ifdef CONFIG_BT_HCIUART_BCM
 int bcm_init(void);
 int bcm_deinit(void);
index 36eb3d012b6d34ac96823cb193001afb42ae95ce..180a8f7ec82de80fdf69f4226d9bb0d8fff54052 100644 (file)
@@ -871,7 +871,7 @@ repoll:
                if (is_eth) {
                        wc->sl  = be16_to_cpu(cqe->sl_vid) >> 13;
                        if (be32_to_cpu(cqe->vlan_my_qpn) &
-                                       MLX4_CQE_VLAN_PRESENT_MASK) {
+                                       MLX4_CQE_CVLAN_PRESENT_MASK) {
                                wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
                                        MLX4_CQE_VID_MASK;
                        } else {
index 7fde4d5c2b28beca9c645a498fb3aefc467fbe26..3c45358844eb94bc8d44c3934e0a5689b9adf994 100644 (file)
@@ -1870,8 +1870,6 @@ static void ad_marker_info_received(struct bond_marker *marker_info,
 static void ad_marker_response_received(struct bond_marker *marker,
                                        struct port *port)
 {
-       marker = NULL;
-       port = NULL;
        /* DO NOTHING, SINCE WE DECIDED NOT TO IMPLEMENT THIS FEATURE FOR NOW */
 }
 
index e1ccefce9a9de629505344f9fc22042ab09b9684..0c627b4733ca56b026e15bf9527330539fd64cd1 100644 (file)
@@ -3779,7 +3779,6 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
        struct slave *slave;
        struct list_head *iter;
        struct bond_up_slave *new_arr, *old_arr;
-       int slaves_in_agg;
        int agg_id = 0;
        int ret = 0;
 
@@ -3810,7 +3809,6 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
                        }
                        goto out;
                }
-               slaves_in_agg = ad_info.ports;
                agg_id = ad_info.aggregator_id;
        }
        bond_for_each_slave(bond, slave, iter) {
index 1bda29249d12254ddbdd2e35572597464a9380c2..db760e84119fcb970b7b34f7c4fac92b1acfed52 100644 (file)
@@ -111,6 +111,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
        [IFLA_BOND_AD_USER_PORT_KEY]    = { .type = NLA_U16 },
        [IFLA_BOND_AD_ACTOR_SYSTEM]     = { .type = NLA_BINARY,
                                            .len  = ETH_ALEN },
+       [IFLA_BOND_TLB_DYNAMIC_LB]      = { .type = NLA_U8 },
 };
 
 static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
@@ -405,7 +406,6 @@ static int bond_changelink(struct net_device *bond_dev,
                if (err)
                        return err;
        }
-
        if (data[IFLA_BOND_AD_USER_PORT_KEY]) {
                int port_key =
                        nla_get_u16(data[IFLA_BOND_AD_USER_PORT_KEY]);
@@ -415,7 +415,6 @@ static int bond_changelink(struct net_device *bond_dev,
                if (err)
                        return err;
        }
-
        if (data[IFLA_BOND_AD_ACTOR_SYSTEM]) {
                if (nla_len(data[IFLA_BOND_AD_ACTOR_SYSTEM]) != ETH_ALEN)
                        return -EINVAL;
@@ -426,6 +425,15 @@ static int bond_changelink(struct net_device *bond_dev,
                if (err)
                        return err;
        }
+       if (data[IFLA_BOND_TLB_DYNAMIC_LB]) {
+               int dynamic_lb = nla_get_u8(data[IFLA_BOND_TLB_DYNAMIC_LB]);
+
+               bond_opt_initval(&newval, dynamic_lb);
+               err = __bond_opt_set(bond, BOND_OPT_TLB_DYNAMIC_LB, &newval);
+               if (err)
+                       return err;
+       }
+
        return 0;
 }
 
@@ -476,6 +484,7 @@ static size_t bond_get_size(const struct net_device *bond_dev)
                nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_ACTOR_SYS_PRIO */
                nla_total_size(sizeof(u16)) + /* IFLA_BOND_AD_USER_PORT_KEY */
                nla_total_size(ETH_ALEN) + /* IFLA_BOND_AD_ACTOR_SYSTEM */
+               nla_total_size(sizeof(u8)) + /* IFLA_BOND_TLB_DYNAMIC_LB */
                0;
 }
 
@@ -598,6 +607,10 @@ static int bond_fill_info(struct sk_buff *skb,
                       bond->params.ad_select))
                goto nla_put_failure;
 
+       if (nla_put_u8(skb, IFLA_BOND_TLB_DYNAMIC_LB,
+                      bond->params.tlb_dynamic_lb))
+               goto nla_put_failure;
+
        if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                struct ad_info info;
 
index e9c624d54dd4cdf869d1cf04859dd0010c7c7f21..6dda57e2e724f575490248cb504120fe7e2ca600 100644 (file)
@@ -420,6 +420,13 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
                .flags = BOND_OPTFLAG_IFDOWN,
                .values = bond_ad_user_port_key_tbl,
                .set = bond_option_ad_user_port_key_set,
+       },
+       [BOND_OPT_NUM_PEER_NOTIF_ALIAS] = {
+               .id = BOND_OPT_NUM_PEER_NOTIF_ALIAS,
+               .name = "num_grat_arp",
+               .desc = "Number of peer notifications to send on failover event",
+               .values = bond_num_peer_notif_tbl,
+               .set = bond_option_num_peer_notif_set
        }
 };
 
index 31835a4dab5784ed11984fadbacd7f5c76fbcc8b..f4ae720862158354d028169de77cafe782f539fa 100644 (file)
@@ -380,7 +380,7 @@ static ssize_t bonding_show_ad_select(struct device *d,
 static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR,
                   bonding_show_ad_select, bonding_sysfs_store_option);
 
-/* Show and set the number of peer notifications to send after a failover event. */
+/* Show the number of peer notifications to send after a failover event. */
 static ssize_t bonding_show_num_peer_notif(struct device *d,
                                           struct device_attribute *attr,
                                           char *buf)
@@ -388,24 +388,10 @@ static ssize_t bonding_show_num_peer_notif(struct device *d,
        struct bonding *bond = to_bond(d);
        return sprintf(buf, "%d\n", bond->params.num_peer_notif);
 }
-
-static ssize_t bonding_store_num_peer_notif(struct device *d,
-                                           struct device_attribute *attr,
-                                           const char *buf, size_t count)
-{
-       struct bonding *bond = to_bond(d);
-       int ret;
-
-       ret = bond_opt_tryset_rtnl(bond, BOND_OPT_NUM_PEER_NOTIF, (char *)buf);
-       if (!ret)
-               ret = count;
-
-       return ret;
-}
 static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR,
-                  bonding_show_num_peer_notif, bonding_store_num_peer_notif);
+                  bonding_show_num_peer_notif, bonding_sysfs_store_option);
 static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR,
-                  bonding_show_num_peer_notif, bonding_store_num_peer_notif);
+                  bonding_show_num_peer_notif, bonding_sysfs_store_option);
 
 /* Show the MII monitor interval. */
 static ssize_t bonding_show_miimon(struct device *d,
index 7ad0a4d8e475f519b0f1a6618091c3c93cb60427..4c483d937481777025e4ab7e8d23ce75316d6f17 100644 (file)
@@ -46,13 +46,13 @@ config NET_DSA_MV88E6171
          ethernet switches chips.
 
 config NET_DSA_MV88E6352
-       tristate "Marvell 88E6172/88E6176/88E6352 ethernet switch chip support"
+       tristate "Marvell 88E6172/6176/6320/6321/6352 ethernet switch chip support"
        depends on NET_DSA
        select NET_DSA_MV88E6XXX
        select NET_DSA_TAG_EDSA
        ---help---
-         This enables support for the Marvell 88E6172, 88E6176 and 88E6352
-         ethernet switch chips.
+         This enables support for the Marvell 88E6172, 88E6176, 88E6320,
+         88E6321 and 88E6352 ethernet switch chips.
 
 config NET_DSA_BCM_SF2
        tristate "Broadcom Starfighter 2 Ethernet switch support"
index 079897b3a9554b55918c97a94f5ba718c314da38..289e20443d83a3507f2700afc91f395828f45149 100644 (file)
@@ -901,15 +901,11 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
                                         struct fixed_phy_status *status)
 {
        struct bcm_sf2_priv *priv = ds_to_priv(ds);
-       u32 duplex, pause, speed;
+       u32 duplex, pause;
        u32 reg;
 
        duplex = core_readl(priv, CORE_DUPSTS);
        pause = core_readl(priv, CORE_PAUSESTS);
-       speed = core_readl(priv, CORE_SPDSTS);
-
-       speed >>= (port * SPDSTS_SHIFT);
-       speed &= SPDSTS_MASK;
 
        status->link = 0;
 
@@ -944,18 +940,6 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
                reg &= ~LINK_STS;
        core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
 
-       switch (speed) {
-       case SPDSTS_10:
-               status->speed = SPEED_10;
-               break;
-       case SPDSTS_100:
-               status->speed = SPEED_100;
-               break;
-       case SPDSTS_1000:
-               status->speed = SPEED_1000;
-               break;
-       }
-
        if ((pause & (1 << port)) &&
            (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
                status->asym_pause = 1;
index 632815c10a401f7bd873e077a262528b73ceed7d..af210efecc554546a762073eb0121823b9337dcb 100644 (file)
@@ -36,6 +36,18 @@ static char *mv88e6352_probe(struct device *host_dev, int sw_addr)
                        return "Marvell 88E6172";
                if ((ret & 0xfff0) == PORT_SWITCH_ID_6176)
                        return "Marvell 88E6176";
+               if (ret == PORT_SWITCH_ID_6320_A1)
+                       return "Marvell 88E6320 (A1)";
+               if (ret == PORT_SWITCH_ID_6320_A2)
+                       return "Marvell 88e6320 (A2)";
+               if ((ret & 0xfff0) == PORT_SWITCH_ID_6320)
+                       return "Marvell 88E6320";
+               if (ret == PORT_SWITCH_ID_6321_A1)
+                       return "Marvell 88E6321 (A1)";
+               if (ret == PORT_SWITCH_ID_6321_A2)
+                       return "Marvell 88e6321 (A2)";
+               if ((ret & 0xfff0) == PORT_SWITCH_ID_6321)
+                       return "Marvell 88E6321";
                if (ret == PORT_SWITCH_ID_6352_A0)
                        return "Marvell 88E6352 (A0)";
                if (ret == PORT_SWITCH_ID_6352_A1)
@@ -80,66 +92,6 @@ static int mv88e6352_setup_global(struct dsa_switch *ds)
        return 0;
 }
 
-#ifdef CONFIG_NET_DSA_HWMON
-
-static int mv88e6352_get_temp(struct dsa_switch *ds, int *temp)
-{
-       int ret;
-
-       *temp = 0;
-
-       ret = mv88e6xxx_phy_page_read(ds, 0, 6, 27);
-       if (ret < 0)
-               return ret;
-
-       *temp = (ret & 0xff) - 25;
-
-       return 0;
-}
-
-static int mv88e6352_get_temp_limit(struct dsa_switch *ds, int *temp)
-{
-       int ret;
-
-       *temp = 0;
-
-       ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
-       if (ret < 0)
-               return ret;
-
-       *temp = (((ret >> 8) & 0x1f) * 5) - 25;
-
-       return 0;
-}
-
-static int mv88e6352_set_temp_limit(struct dsa_switch *ds, int temp)
-{
-       int ret;
-
-       ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
-       if (ret < 0)
-               return ret;
-       temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
-       return mv88e6xxx_phy_page_write(ds, 0, 6, 26,
-                                       (ret & 0xe0ff) | (temp << 8));
-}
-
-static int mv88e6352_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
-{
-       int ret;
-
-       *alarm = false;
-
-       ret = mv88e6xxx_phy_page_read(ds, 0, 6, 26);
-       if (ret < 0)
-               return ret;
-
-       *alarm = !!(ret & 0x40);
-
-       return 0;
-}
-#endif /* CONFIG_NET_DSA_HWMON */
-
 static int mv88e6352_setup(struct dsa_switch *ds)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -377,10 +329,10 @@ struct dsa_switch_driver mv88e6352_switch_driver = {
        .set_eee                = mv88e6xxx_set_eee,
        .get_eee                = mv88e6xxx_get_eee,
 #ifdef CONFIG_NET_DSA_HWMON
-       .get_temp               = mv88e6352_get_temp,
-       .get_temp_limit         = mv88e6352_get_temp_limit,
-       .set_temp_limit         = mv88e6352_set_temp_limit,
-       .get_temp_alarm         = mv88e6352_get_temp_alarm,
+       .get_temp               = mv88e6xxx_get_temp,
+       .get_temp_limit         = mv88e6xxx_get_temp_limit,
+       .set_temp_limit         = mv88e6xxx_set_temp_limit,
+       .get_temp_alarm         = mv88e6xxx_get_temp_alarm,
 #endif
        .get_eeprom             = mv88e6352_get_eeprom,
        .set_eeprom             = mv88e6352_set_eeprom,
@@ -394,5 +346,8 @@ struct dsa_switch_driver mv88e6352_switch_driver = {
        .fdb_getnext            = mv88e6xxx_port_fdb_getnext,
 };
 
-MODULE_ALIAS("platform:mv88e6352");
 MODULE_ALIAS("platform:mv88e6172");
+MODULE_ALIAS("platform:mv88e6176");
+MODULE_ALIAS("platform:mv88e6320");
+MODULE_ALIAS("platform:mv88e6321");
+MODULE_ALIAS("platform:mv88e6352");
index 561342466076c57888bcd92b4f463756f4d10a49..61ce4cf120a687e2473f35c29f3e41839b9f70f2 100644 (file)
@@ -517,6 +517,18 @@ static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
        return false;
 }
 
+static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+       switch (ps->id) {
+       case PORT_SWITCH_ID_6320:
+       case PORT_SWITCH_ID_6321:
+               return true;
+       }
+       return false;
+}
+
 static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
@@ -565,7 +577,7 @@ static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
 {
        int ret;
 
-       if (mv88e6xxx_6352_family(ds))
+       if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
                port = (port + 1) << 5;
 
        /* Snapshot the hardware statistics counters for this port. */
@@ -796,54 +808,6 @@ void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
        }
 }
 
-#ifdef CONFIG_NET_DSA_HWMON
-
-int  mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
-{
-       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int ret;
-       int val;
-
-       *temp = 0;
-
-       mutex_lock(&ps->smi_mutex);
-
-       ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
-       if (ret < 0)
-               goto error;
-
-       /* Enable temperature sensor */
-       ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
-       if (ret < 0)
-               goto error;
-
-       ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
-       if (ret < 0)
-               goto error;
-
-       /* Wait for temperature to stabilize */
-       usleep_range(10000, 12000);
-
-       val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
-       if (val < 0) {
-               ret = val;
-               goto error;
-       }
-
-       /* Disable temperature sensor */
-       ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
-       if (ret < 0)
-               goto error;
-
-       *temp = ((val & 0x1f) - 5) * 5;
-
-error:
-       _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
-       mutex_unlock(&ps->smi_mutex);
-       return ret;
-}
-#endif /* CONFIG_NET_DSA_HWMON */
-
 /* Must be called with SMI lock held */
 static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
                           u16 mask)
@@ -1377,7 +1341,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
        if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
            mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
            mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
-           mv88e6xxx_6065_family(ds)) {
+           mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
                /* MAC Forcing register: don't force link, speed,
                 * duplex or flow control state to any particular
                 * values on physical ports, but force the CPU port
@@ -1423,7 +1387,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
        if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
            mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
            mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
-           mv88e6xxx_6185_family(ds))
+           mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
                reg = PORT_CONTROL_IGMP_MLD_SNOOP |
                PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
                PORT_CONTROL_STATE_FORWARDING;
@@ -1431,7 +1395,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
                if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
                        reg |= PORT_CONTROL_DSA_TAG;
                if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-                   mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+                   mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+                   mv88e6xxx_6320_family(ds)) {
                        if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
                                reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
                        else
@@ -1441,14 +1406,15 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
                if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
                    mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
                    mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
-                   mv88e6xxx_6185_family(ds)) {
+                   mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
                        if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
                                reg |= PORT_CONTROL_EGRESS_ADD_TAG;
                }
        }
        if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
            mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-           mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds)) {
+           mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
+           mv88e6xxx_6320_family(ds)) {
                if (ds->dsa_port_mask & (1 << port))
                        reg |= PORT_CONTROL_FRAME_MODE_DSA;
                if (port == dsa_upstream_port(ds))
@@ -1473,11 +1439,11 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
        reg = 0;
        if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
            mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-           mv88e6xxx_6095_family(ds))
+           mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds))
                reg = PORT_CONTROL_2_MAP_DA;
 
        if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-           mv88e6xxx_6165_family(ds))
+           mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
                reg |= PORT_CONTROL_2_JUMBO_10240;
 
        if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
@@ -1514,7 +1480,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
                goto abort;
 
        if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+           mv88e6xxx_6320_family(ds)) {
                /* Do not limit the period of time that this port can
                 * be paused for by the remote end or the period of
                 * time that this port can pause the remote end.
@@ -1564,7 +1531,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
 
        if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
            mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-           mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) {
+           mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
+           mv88e6xxx_6320_family(ds)) {
                /* Rate Control: disable ingress rate limiting. */
                ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
                                           PORT_RATE_CONTROL, 0x0001);
@@ -1976,7 +1944,8 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds)
                          (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
 
        if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
-           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds)) {
+           mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
+           mv88e6xxx_6320_family(ds)) {
                /* Send all frames with destination addresses matching
                 * 01:80:c2:00:00:2x to the CPU port.
                 */
@@ -1995,7 +1964,8 @@ int mv88e6xxx_setup_global(struct dsa_switch *ds)
 
        if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
            mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
-           mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds)) {
+           mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
+           mv88e6xxx_6320_family(ds)) {
                /* Disable ingress rate limiting by resetting all
                 * ingress rate limit registers to their initial
                 * state.
@@ -2162,6 +2132,132 @@ mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
        return ret;
 }
 
+#ifdef CONFIG_NET_DSA_HWMON
+
+static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
+{
+       struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+       int ret;
+       int val;
+
+       *temp = 0;
+
+       mutex_lock(&ps->smi_mutex);
+
+       ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
+       if (ret < 0)
+               goto error;
+
+       /* Enable temperature sensor */
+       ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+       if (ret < 0)
+               goto error;
+
+       ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
+       if (ret < 0)
+               goto error;
+
+       /* Wait for temperature to stabilize */
+       usleep_range(10000, 12000);
+
+       val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+       if (val < 0) {
+               ret = val;
+               goto error;
+       }
+
+       /* Disable temperature sensor */
+       ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
+       if (ret < 0)
+               goto error;
+
+       *temp = ((val & 0x1f) - 5) * 5;
+
+error:
+       _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
+       mutex_unlock(&ps->smi_mutex);
+       return ret;
+}
+
+static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
+{
+       int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+       int ret;
+
+       *temp = 0;
+
+       ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
+       if (ret < 0)
+               return ret;
+
+       *temp = (ret & 0xff) - 25;
+
+       return 0;
+}
+
+int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
+{
+       if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
+               return mv88e63xx_get_temp(ds, temp);
+
+       return mv88e61xx_get_temp(ds, temp);
+}
+
+int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
+{
+       int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+       int ret;
+
+       if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+               return -EOPNOTSUPP;
+
+       *temp = 0;
+
+       ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+       if (ret < 0)
+               return ret;
+
+       *temp = (((ret >> 8) & 0x1f) * 5) - 25;
+
+       return 0;
+}
+
+int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
+{
+       int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+       int ret;
+
+       if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+               return -EOPNOTSUPP;
+
+       ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+       if (ret < 0)
+               return ret;
+       temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
+       return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
+                                       (ret & 0xe0ff) | (temp << 8));
+}
+
+int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
+{
+       int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+       int ret;
+
+       if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+               return -EOPNOTSUPP;
+
+       *alarm = false;
+
+       ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
+       if (ret < 0)
+               return ret;
+
+       *alarm = !!(ret & 0x40);
+
+       return 0;
+}
+#endif /* CONFIG_NET_DSA_HWMON */
+
 static int __init mv88e6xxx_init(void)
 {
 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
index a650b2656de9f0c4b8181bc3d3b3a4c533b4aa7a..78e37226a37d2d90fd8c8924d484c057a7700d4a 100644 (file)
 #define PORT_SWITCH_ID_6182    0x1a60
 #define PORT_SWITCH_ID_6185    0x1a70
 #define PORT_SWITCH_ID_6240    0x2400
-#define PORT_SWITCH_ID_6320    0x1250
+#define PORT_SWITCH_ID_6320    0x1150
+#define PORT_SWITCH_ID_6320_A1 0x1151
+#define PORT_SWITCH_ID_6320_A2 0x1152
+#define PORT_SWITCH_ID_6321    0x3100
+#define PORT_SWITCH_ID_6321_A1 0x3101
+#define PORT_SWITCH_ID_6321_A2 0x3102
 #define PORT_SWITCH_ID_6350    0x3710
 #define PORT_SWITCH_ID_6351    0x3750
 #define PORT_SWITCH_ID_6352    0x3520
@@ -389,7 +394,10 @@ int mv88e6xxx_get_sset_count_basic(struct dsa_switch *ds);
 int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port);
 void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
                        struct ethtool_regs *regs, void *_p);
-int  mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
+int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
+int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp);
+int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp);
+int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm);
 int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds);
 int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds);
 int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum);
@@ -410,6 +418,7 @@ int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
 int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg);
 int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
                             int reg, int val);
+
 extern struct dsa_switch_driver mv88e6131_switch_driver;
 extern struct dsa_switch_driver mv88e6123_61_65_switch_driver;
 extern struct dsa_switch_driver mv88e6352_switch_driver;
index f3bb1784066baf51c97f464a1677aac50f781cdb..05aa7597dab9b712de711f61c2d14e8bd0e992e6 100644 (file)
@@ -167,6 +167,7 @@ source "drivers/net/ethernet/sgi/Kconfig"
 source "drivers/net/ethernet/smsc/Kconfig"
 source "drivers/net/ethernet/stmicro/Kconfig"
 source "drivers/net/ethernet/sun/Kconfig"
+source "drivers/net/ethernet/synopsys/Kconfig"
 source "drivers/net/ethernet/tehuti/Kconfig"
 source "drivers/net/ethernet/ti/Kconfig"
 source "drivers/net/ethernet/tile/Kconfig"
index c51014b0464f604c0f41118d8dff625e4993aa12..f42177b1172313a3444e17a420cbbdd1f1d74942 100644 (file)
@@ -77,6 +77,7 @@ obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
 obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
 obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
 obj-$(CONFIG_NET_VENDOR_SUN) += sun/
+obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
 obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
 obj-$(CONFIG_NET_VENDOR_TI) += ti/
 obj-$(CONFIG_TILE_NET) += tile/
index 4566cdf0bc398e977b310729a3ab129a190bc9b1..b9a5a97ed4dd4abc77c488cabdd0a47e99f0693f 100644 (file)
@@ -933,6 +933,21 @@ static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bcm_sysport_poll_controller(struct net_device *dev)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+       disable_irq(priv->irq0);
+       bcm_sysport_rx_isr(priv->irq0, priv);
+       enable_irq(priv->irq0);
+
+       disable_irq(priv->irq1);
+       bcm_sysport_tx_isr(priv->irq1, priv);
+       enable_irq(priv->irq1);
+}
+#endif
+
 static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
                                              struct net_device *dev)
 {
@@ -1723,6 +1738,9 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
        .ndo_set_features       = bcm_sysport_set_features,
        .ndo_set_rx_mode        = bcm_sysport_set_rx_mode,
        .ndo_set_mac_address    = bcm_sysport_change_mac,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = bcm_sysport_poll_controller,
+#endif
 };
 
 #define REV_FMT        "v%2x.%02x"
index cd4ae76bbff2f8acda89154e65cf699e141553e5..5762c485ea06e75305a88784e0b34816680e164e 100644 (file)
@@ -1,6 +1,8 @@
-/* bnx2x.h: Broadcom Everest network driver.
+/* bnx2x.h: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -30,7 +32,7 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.710.51-0"
+#define DRV_MODULE_VERSION      "1.712.30-0"
 #define DRV_MODULE_RELDATE      "2014/02/10"
 #define BNX2X_BC_VER            0x040200
 
@@ -1226,6 +1228,10 @@ struct bnx2x_slowpath {
                struct eth_classify_rules_ramrod_data   e2;
        } mac_rdata;
 
+       union {
+               struct eth_classify_rules_ramrod_data   e2;
+       } vlan_rdata;
+
        union {
                struct tstorm_eth_mac_filter_config     e1x;
                struct eth_filter_rules_ramrod_data     e2;
@@ -1408,6 +1414,9 @@ struct bnx2x_sp_objs {
 
        /* Queue State object */
        struct bnx2x_queue_sp_obj q_obj;
+
+       /* VLANs object */
+       struct bnx2x_vlan_mac_obj vlan_obj;
 };
 
 struct bnx2x_fp_stats {
@@ -1422,6 +1431,13 @@ enum {
        SUB_MF_MODE_UNKNOWN = 0,
        SUB_MF_MODE_UFP,
        SUB_MF_MODE_NPAR1_DOT_5,
+       SUB_MF_MODE_BD,
+};
+
+struct bnx2x_vlan_entry {
+       struct list_head link;
+       u16 vid;
+       bool hw;
 };
 
 struct bnx2x {
@@ -1636,6 +1652,8 @@ struct bnx2x {
        u8                      mf_sub_mode;
 #define IS_MF_UFP(bp)          (IS_MF_SD(bp) && \
                                 bp->mf_sub_mode == SUB_MF_MODE_UFP)
+#define IS_MF_BD(bp)           (IS_MF_SD(bp) && \
+                                bp->mf_sub_mode == SUB_MF_MODE_BD)
 
        u8                      wol;
 
@@ -1860,8 +1878,6 @@ struct bnx2x {
        int                                     dcb_version;
 
        /* CAM credit pools */
-
-       /* used only in sriov */
        struct bnx2x_credit_pool_obj            vlans_pool;
 
        struct bnx2x_credit_pool_obj            macs_pool;
@@ -1924,6 +1940,11 @@ struct bnx2x {
        u16 rx_filter;
 
        struct bnx2x_link_report_data           vf_link_vars;
+       struct list_head vlan_reg;
+       u16 vlan_cnt;
+       u16 vlan_credit;
+       u16 vxlan_dst_port;
+       bool accept_any_vlan;
 };
 
 /* Tx queues may be less or equal to Rx queues */
@@ -1951,23 +1972,14 @@ extern int num_queues;
 #define RSS_IPV6_TCP_CAP_MASK                                          \
        TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
 
-/* func init flags */
-#define FUNC_FLG_RSS           0x0001
-#define FUNC_FLG_STATS         0x0002
-/* removed  FUNC_FLG_UNMATCHED 0x0004 */
-#define FUNC_FLG_TPA           0x0008
-#define FUNC_FLG_SPQ           0x0010
-#define FUNC_FLG_LEADING       0x0020  /* PF only */
-#define FUNC_FLG_LEADING_STATS 0x0040
 struct bnx2x_func_init_params {
        /* dma */
-       dma_addr_t      fw_stat_map;    /* valid iff FUNC_FLG_STATS */
-       dma_addr_t      spq_map;        /* valid iff FUNC_FLG_SPQ */
+       bool            spq_active;
+       dma_addr_t      spq_map;
+       u16             spq_prod;
 
-       u16             func_flgs;
        u16             func_id;        /* abs fid */
        u16             pf_id;
-       u16             spq_prod;       /* valid iff FUNC_FLG_SPQ */
 };
 
 #define for_each_cnic_queue(bp, var) \
@@ -2077,6 +2089,11 @@ struct bnx2x_func_init_params {
 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
                      struct bnx2x_vlan_mac_obj *obj, bool set,
                      int mac_type, unsigned long *ramrod_flags);
+
+int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
+                      struct bnx2x_vlan_mac_obj *obj, bool set,
+                      unsigned long *ramrod_flags);
+
 /**
  * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
  *
@@ -2481,6 +2498,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
 #define VF_ACQUIRE_THRESH              3
 #define VF_ACQUIRE_MAC_FILTERS         1
 #define VF_ACQUIRE_MC_FILTERS          10
+#define VF_ACQUIRE_VLAN_FILTERS                2 /* VLAN0 + 'real' VLAN */
 
 #define GOOD_ME_REG(me_reg) (((me_reg) & ME_REG_VF_VALID) && \
                            (!((me_reg) & ME_REG_VF_ERR)))
@@ -2577,6 +2595,8 @@ void bnx2x_set_local_cmng(struct bnx2x *bp);
 
 void bnx2x_update_mng_version(struct bnx2x *bp);
 
+void bnx2x_update_mfw_dump(struct bnx2x *bp);
+
 #define MCPR_SCRATCH_BASE(bp) \
        (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
 
@@ -2589,4 +2609,9 @@ void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb);
 #define BNX2X_MAX_PHC_DRIFT 31000000
 #define BNX2X_PTP_TX_TIMEOUT
 
+/* Re-configure all previously configured vlan filters.
+ * Meant for implicit re-load flows.
+ */
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp);
+
 #endif /* bnx2x.h */
index a90d7364334f9dfa3687dc813e068508a342861c..1637de6caf46b5213e3148d5d3e44309e1f1483d 100644 (file)
@@ -1,6 +1,8 @@
-/* bnx2x_cmn.c: Broadcom Everest network driver.
+/* bnx2x_cmn.c: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -2103,9 +2105,14 @@ int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
                if (rss_obj->udp_rss_v6)
                        __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
 
-               if (!CHIP_IS_E1x(bp))
+               if (!CHIP_IS_E1x(bp)) {
+                       /* valid only for TUNN_MODE_VXLAN tunnel mode */
+                       __set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
+                       __set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
+
                        /* valid only for TUNN_MODE_GRE tunnel mode */
-                       __set_bit(BNX2X_RSS_GRE_INNER_HDRS, &params.rss_flags);
+                       __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
+               }
        } else {
                __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
        }
@@ -2510,6 +2517,20 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
                fp->mode = TPA_MODE_DISABLED;
 }
 
+void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
+{
+       u32 cur;
+
+       if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
+               return;
+
+       cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
+       DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
+          cur, state);
+
+       SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
+}
+
 int bnx2x_load_cnic(struct bnx2x *bp)
 {
        int i, rc, port = BP_PORT(bp);
@@ -2827,6 +2848,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 
        /* Start fast path */
 
+       /* Re-configure vlan filters */
+       rc = bnx2x_vlan_reconfigure_vid(bp);
+       if (rc)
+               LOAD_ERROR_EXIT(bp, load_error3);
+
        /* Initialize Rx filter. */
        bnx2x_set_rx_mode_inner(bp);
 
@@ -2873,6 +2899,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
                /* mark driver is loaded in shmem2 */
                u32 val;
                val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+               val &= ~DRV_FLAGS_MTU_MASK;
+               val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
                SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
                          val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
                          DRV_FLAGS_CAPABILITIES_LOADED_L2);
@@ -2885,10 +2913,17 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
                return -EBUSY;
        }
 
+       /* Update driver data for On-Chip MFW dump. */
+       if (IS_PF(bp))
+               bnx2x_update_mfw_dump(bp);
+
        /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
        if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
                bnx2x_dcbx_init(bp, false);
 
+       if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+               bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
+
        DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
 
        return 0;
@@ -2956,6 +2991,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
 
        DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
 
+       if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+               bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
+
        /* mark driver is unloaded in shmem2 */
        if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
                u32 val;
@@ -3677,7 +3715,7 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
                pbd2->fw_ip_hdr_to_payload_w =
                        hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
                pbd_e2->data.tunnel_data.flags |=
-                       ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER;
+                       ETH_TUNNEL_DATA_IPV6_OUTER;
        }
 
        pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
@@ -4184,6 +4222,41 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 }
 
+void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
+{
+       int mfw_vn = BP_FW_MB_IDX(bp);
+       u32 tmp;
+
+       /* If the shmem shouldn't affect configuration, reflect */
+       if (!IS_MF_BD(bp)) {
+               int i;
+
+               for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
+                       c2s_map[i] = i;
+               *c2s_default = 0;
+
+               return;
+       }
+
+       tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
+       tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+       c2s_map[0] = tmp & 0xff;
+       c2s_map[1] = (tmp >> 8) & 0xff;
+       c2s_map[2] = (tmp >> 16) & 0xff;
+       c2s_map[3] = (tmp >> 24) & 0xff;
+
+       tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
+       tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+       c2s_map[4] = tmp & 0xff;
+       c2s_map[5] = (tmp >> 8) & 0xff;
+       c2s_map[6] = (tmp >> 16) & 0xff;
+       c2s_map[7] = (tmp >> 24) & 0xff;
+
+       tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
+       tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+       *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
+}
+
 /**
  * bnx2x_setup_tc - routine to configure net_device for multi tc
  *
@@ -4194,8 +4267,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
  */
 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
 {
-       int cos, prio, count, offset;
        struct bnx2x *bp = netdev_priv(dev);
+       u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
+       int cos, prio, count, offset;
 
        /* setup tc must be called under rtnl lock */
        ASSERT_RTNL();
@@ -4219,12 +4293,16 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
                return -EINVAL;
        }
 
+       bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
+
        /* configure priority to traffic class mapping */
        for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
-               netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
+               int outer_prio = c2s_map[prio];
+
+               netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
                DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
                   "mapping priority %d to tc %d\n",
-                  prio, bp->prio_to_cos[prio]);
+                  outer_prio, bp->prio_to_cos[outer_prio]);
        }
 
        /* Use this configuration to differentiate tc0 from other COSes
@@ -4278,6 +4356,9 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
        if (netif_running(dev))
                rc = bnx2x_set_eth_mac(bp, true);
 
+       if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
+               SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
        return rc;
 }
 
@@ -4831,6 +4912,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
         */
        dev->mtu = new_mtu;
 
+       if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
+               SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
        return bnx2x_reload_if_running(dev);
 }
 
index 03b7404d5b9ba59c5470fe36ec0746d6b75f7eee..fa7c532012654eb05ad42fce20e92c6ab03cef32 100644 (file)
@@ -1,6 +1,8 @@
-/* bnx2x_cmn.h: Broadcom Everest network driver.
+/* bnx2x_cmn.h: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -620,6 +622,14 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
  */
 void bnx2x_tx_timeout(struct net_device *dev);
 
+/** bnx2x_get_c2s_mapping - read inner-to-outer vlan configuration
+ * c2s_map should have BNX2X_MAX_PRIORITY entries.
+ * @bp:                        driver handle
+ * @c2s_map:           should have BNX2X_MAX_PRIORITY entries for mapping
+ * @c2s_default:       entry for non-tagged configuration
+ */
+void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default);
+
 /*********************** Inlines **********************************/
 /*********************** Fast path ********************************/
 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
@@ -931,14 +941,33 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
        start_params->mf_mode = bp->mf_mode;
        start_params->sd_vlan_tag = bp->mf_ov;
 
+       /* Configure Ethertype for BD mode */
+       if (IS_MF_BD(bp)) {
+               DP(NETIF_MSG_IFUP, "Configuring ethertype 0x88a8 for BD\n");
+               start_params->sd_vlan_eth_type = ETH_P_8021AD;
+               REG_WR(bp, PRS_REG_VLAN_TYPE_0, ETH_P_8021AD);
+               REG_WR(bp, PBF_REG_VLAN_TYPE_0, ETH_P_8021AD);
+               REG_WR(bp, NIG_REG_LLH_E1HOV_TYPE_1, ETH_P_8021AD);
+
+               bnx2x_get_c2s_mapping(bp, start_params->c2s_pri,
+                                     &start_params->c2s_pri_default);
+               start_params->c2s_pri_valid = 1;
+
+               DP(NETIF_MSG_IFUP,
+                  "Inner-to-Outer priority: %02x %02x %02x %02x %02x %02x %02x %02x [Default %02x]\n",
+                  start_params->c2s_pri[0], start_params->c2s_pri[1],
+                  start_params->c2s_pri[2], start_params->c2s_pri[3],
+                  start_params->c2s_pri[4], start_params->c2s_pri[5],
+                  start_params->c2s_pri[6], start_params->c2s_pri[7],
+                  start_params->c2s_pri_default);
+       }
+
        if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
                start_params->network_cos_mode = STATIC_COS;
        else /* CHIP_IS_E1X */
                start_params->network_cos_mode = FW_WRR;
 
-       start_params->tunnel_mode       = TUNN_MODE_GRE;
-       start_params->gre_tunnel_type   = IPGRE_TUNNEL;
-       start_params->inner_gre_rss_en  = 1;
+       start_params->inner_rss = 1;
 
        if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
                start_params->class_fail_ethtype = ETH_P_FIP;
@@ -1037,6 +1066,15 @@ static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
                           BNX2X_FILTER_MAC_PENDING,
                           &bp->sp_state, obj_type,
                           &bp->macs_pool);
+
+       if (!CHIP_IS_E1x(bp))
+               bnx2x_init_vlan_obj(bp, &bnx2x_sp_obj(bp, fp).vlan_obj,
+                                   fp->cl_id, fp->cid, BP_FUNC(bp),
+                                   bnx2x_sp(bp, vlan_rdata),
+                                   bnx2x_sp_mapping(bp, vlan_rdata),
+                                   BNX2X_FILTER_VLAN_PENDING,
+                                   &bp->sp_state, obj_type,
+                                   &bp->vlans_pool);
 }
 
 /**
@@ -1096,7 +1134,7 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
        bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
                                   bnx2x_get_path_func_num(bp));
 
-       bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_ABS_FUNC(bp)>>1,
+       bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_FUNC(bp),
                                    bnx2x_get_path_func_num(bp));
 
        /* RSS configuration object */
@@ -1106,6 +1144,8 @@ static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
                                  bnx2x_sp_mapping(bp, rss_rdata),
                                  BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
                                  BNX2X_OBJ_TYPE_RX);
+
+       bp->vlan_credit = PF_VLAN_CREDIT_E2(bp, bnx2x_get_path_func_num(bp));
 }
 
 static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
@@ -1339,4 +1379,11 @@ void bnx2x_squeeze_objects(struct bnx2x *bp);
 void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
                            u32 verbose);
 
+/**
+ * bnx2x_set_os_driver_state - write driver state for management FW usage
+ *
+ * @bp:                driver handle
+ * @state:     OS_DRIVER_STATE_* value reflecting current driver state
+ */
+void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state);
 #endif /* BNX2X_CMN_H */
index 6e4294ed1fc997e5545fcc48a24c92124129867c..7ccf6684e0a32d3b4a6caa78fa1fbff8dbbef460 100644 (file)
@@ -1,15 +1,17 @@
-/* bnx2x_dcb.c: Broadcom Everest network driver.
+/* bnx2x_dcb.c: QLogic Everest network driver.
  *
  * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -1850,6 +1852,8 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
                        if (bp->dcbx_port_params.ets.cos_params[cos].
                                                pri_bitmask & pri_bit)
                                        tt2cos[pri].cos = cos;
+
+               pfc_fw_cfg->dcb_outer_pri[pri]  = ttp[pri];
        }
 
        /* we never want the FW to add a 0 vlan tag */
index c6939ecb02c572fd41fa5d2c814ed5ad932c26e7..9a9517c0f703f756beb7df5522250f9b802c3ad1 100644 (file)
@@ -1,15 +1,17 @@
-/* bnx2x_dcb.h: Broadcom Everest network driver.
+/* bnx2x_dcb.h: QLogic Everest network driver.
  *
  * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
index 741aa130c19f4efb4df2e2b64b166f24bd4b6b3e..eccfa13b0f2d5c43132e581ac15c32f752903404 100644 (file)
@@ -1,15 +1,17 @@
-/* bnx2x_dump.h: Broadcom Everest network driver.
+/* bnx2x_dump.h: QLogic Everest network driver.
  *
  * Copyright (c) 2012-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  */
 
index 76b9052a961c517978494199d74398264583508c..6b2050a198df8ebd43fb29ec4176424491f29ac8 100644 (file)
@@ -1,6 +1,8 @@
-/* bnx2x_ethtool.c: Broadcom Everest network driver.
+/* bnx2x_ethtool.c: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -1129,6 +1131,9 @@ static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
        } else
                bp->wol = 0;
 
+       if (SHMEM2_HAS(bp, curr_cfg))
+               SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
        return 0;
 }
 
@@ -3562,17 +3567,8 @@ static int bnx2x_get_ts_info(struct net_device *dev,
 
                info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
                                   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
-                                  (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
-                                  (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
                                   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
-                                  (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
-                                  (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
-                                  (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
-                                  (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
-                                  (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
-                                  (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
-                                  (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
-                                  (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+                                  (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
 
                info->tx_types = (1 << HWTSTAMP_TX_OFF)|(1 << HWTSTAMP_TX_ON);
 
index 7636e3c18771dced3a1d4bd3db0018cee4b4d7f3..226ab29f4cb6a4d10a9d41b501701bf953040c17 100644 (file)
@@ -1,6 +1,8 @@
-/* bnx2x_fw_defs.h: Broadcom Everest network driver.
+/* bnx2x_fw_defs.h: Qlogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #define MAX_COS_NUMBER 4
 #define MAX_TRAFFIC_TYPES 8
 #define MAX_PFC_PRIORITIES 8
-
+#define MAX_VLAN_PRIORITIES 8
        /* used by array traffic_type_to_priority[] to mark traffic type \
        that is not mapped to priority*/
 #define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
index 8aafd9b5d6a2b107f67a8cce344b4bb479ccc85c..9e3b5a1e9f4f490bec6f74d1cb1f5c949289d67d 100644 (file)
@@ -1,6 +1,8 @@
 /* bnx2x_fw_file_hdr.h: FW binary file header structure.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 058bc73282201e8b9f897273c18ac18856eac38e..08a08fa49caad3fb8850b0f92f4285c43a667835 100644 (file)
@@ -1,6 +1,8 @@
-/* bnx2x_hsi.h: Broadcom Everest network driver.
+/* bnx2x_hsi.h: Qlogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -729,6 +731,7 @@ struct port_hw_cfg {                    /* port 0: 0x12c  port 1: 0x2bc */
                #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722       0x00000f00
                #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54616      0x00001000
                #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84834      0x00001100
+               #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84858      0x00001200
                #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE       0x0000fd00
                #define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN      0x0000ff00
 
@@ -786,6 +789,7 @@ struct port_hw_cfg {                    /* port 0: 0x12c  port 1: 0x2bc */
                #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722        0x00000f00
                #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616       0x00001000
                #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834       0x00001100
+               #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858       0x00001200
                #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC      0x0000fc00
                #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE        0x0000fd00
                #define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN       0x0000ff00
@@ -864,6 +868,7 @@ struct shared_feat_cfg {             /* NVRAM Offset */
                #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4          0x00000200
                #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT  0x00000300
                #define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE      0x00000400
+               #define SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE        0x00000500
                #define SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE       0x00000600
                #define SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE  0x00000700
 
@@ -2064,6 +2069,26 @@ struct ncsi_oem_fcoe_features {
        #define FCOE_FEATURES4_FEATURE_SETTINGS_OFFSET          0
 };
 
+enum curr_cfg_method_e {
+       CURR_CFG_MET_NONE = 0,  /* default config */
+       CURR_CFG_MET_OS = 1,
+       CURR_CFG_MET_VENDOR_SPEC = 2,/* e.g. Option ROM, NPAR, O/S Cfg Utils */
+};
+
+struct mdump_driver_info {
+       u32 epoc;
+       u32 drv_ver;
+       u32 fw_ver;
+
+       u32 valid_dump;
+       #define FIRST_DUMP_VALID        (1 << 0)
+       #define SECOND_DUMP_VALID       (1 << 1)
+
+       u32 flags;
+       #define ENABLE_ALL_TRIGGERS     (0x7fffffff)
+       #define TRIGGER_MDUMP_ONCE      (1 << 31)
+};
+
 struct ncsi_oem_data {
        u32 driver_version[4];
        struct ncsi_oem_fcoe_features ncsi_oem_fcoe_features;
@@ -2187,6 +2212,8 @@ struct shmem2_region {
 #define DRV_FLAGS_CAPABILITIES_LOADED_L2        0x00000002
 #define DRV_FLAGS_CAPABILITIES_LOADED_FCOE      0x00000004
 #define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI     0x00000008
+#define DRV_FLAGS_MTU_MASK                     0xffff0000
+#define DRV_FLAGS_MTU_SHIFT                    16
 
        u32 extended_dev_info_shared_cfg_size;
 
@@ -2251,6 +2278,7 @@ struct shmem2_region {
        u32 reserved4;                          /* Offset 0x150 */
        u32 link_attr_sync[PORT_MAX];           /* Offset 0x154 */
        #define LINK_ATTR_SYNC_KR2_ENABLE       0x00000001
+       #define LINK_ATTR_84858                 0x00000002
        #define LINK_SFP_EEPROM_COMP_CODE_MASK  0x0000ff00
        #define LINK_SFP_EEPROM_COMP_CODE_SHIFT          8
        #define LINK_SFP_EEPROM_COMP_CODE_SR    0x00001000
@@ -2268,6 +2296,74 @@ struct shmem2_region {
 
        /* We use indication for each PF (0..3) */
 #define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_))
+       union { /* For various OEMs */                  /* Offset 0x1a0 */
+               u8 storage_boot_prog[E2_FUNC_MAX];
+       #define STORAGE_BOOT_PROG_MASK                          0x000000FF
+       #define STORAGE_BOOT_PROG_NONE                          0x00000000
+       #define STORAGE_BOOT_PROG_ISCSI_IP_ACQUIRED             0x00000002
+       #define STORAGE_BOOT_PROG_FCOE_FABRIC_LOGIN_SUCCESS     0x00000002
+       #define STORAGE_BOOT_PROG_TARGET_FOUND                  0x00000004
+       #define STORAGE_BOOT_PROG_ISCSI_CHAP_SUCCESS            0x00000008
+       #define STORAGE_BOOT_PROG_FCOE_LUN_FOUND                0x00000008
+       #define STORAGE_BOOT_PROG_LOGGED_INTO_TGT               0x00000010
+       #define STORAGE_BOOT_PROG_IMG_DOWNLOADED                0x00000020
+       #define STORAGE_BOOT_PROG_OS_HANDOFF                    0x00000040
+       #define STORAGE_BOOT_PROG_COMPLETED                     0x00000080
+
+               u32 oem_i2c_data_addr;
+       };
+
+       /* 9 entires for the C2S PCP map for each inner VLAN PCP + 1 default */
+       /* For PCP values 0-3 use the map lower */
+       /* 0xFF000000 - PCP 0, 0x00FF0000 - PCP 1,
+        * 0x0000FF00 - PCP 2, 0x000000FF PCP 3
+        */
+       u32 c2s_pcp_map_lower[E2_FUNC_MAX];                     /* 0x1a4 */
+
+       /* For PCP values 4-7 use the map upper */
+       /* 0xFF000000 - PCP 4, 0x00FF0000 - PCP 5,
+        * 0x0000FF00 - PCP 6, 0x000000FF PCP 7
+        */
+       u32 c2s_pcp_map_upper[E2_FUNC_MAX];                     /* 0x1b4 */
+
+       /* For PCP default value get the MSB byte of the map default */
+       u32 c2s_pcp_map_default[E2_FUNC_MAX];                   /* 0x1c4 */
+
+       /* FC_NPIV table offset in NVRAM */
+       u32 fc_npiv_nvram_tbl_addr[PORT_MAX];                   /* 0x1d4 */
+
+       /* Shows last method that changed configuration of this device */
+       enum curr_cfg_method_e curr_cfg;                        /* 0x1dc */
+
+       /* Storm FW version, shold be kept in the format 0xMMmmbbdd:
+        * MM - Major, mm - Minor, bb - Build ,dd - Drop
+        */
+       u32 netproc_fw_ver;                                     /* 0x1e0 */
+
+       /* Option ROM SMASH CLP version */
+       u32 clp_ver;                                            /* 0x1e4 */
+
+       u32 pcie_bus_num;                                       /* 0x1e8 */
+
+       u32 sriov_switch_mode;                                  /* 0x1ec */
+       #define SRIOV_SWITCH_MODE_NONE          0x0
+       #define SRIOV_SWITCH_MODE_VEB           0x1
+       #define SRIOV_SWITCH_MODE_VEPA          0x2
+
+       u8  rsrv2[E2_FUNC_MAX];                                 /* 0x1f0 */
+
+       u32 img_inv_table_addr; /* Address to INV_TABLE_P */    /* 0x1f4 */
+
+       u32 mtu_size[E2_FUNC_MAX];                              /* 0x1f8 */
+
+       u32 os_driver_state[E2_FUNC_MAX];                       /* 0x208 */
+       #define OS_DRIVER_STATE_NOT_LOADED      0 /* not installed */
+       #define OS_DRIVER_STATE_LOADING         1 /* transition state */
+       #define OS_DRIVER_STATE_DISABLED        2 /* installed but disabled */
+       #define OS_DRIVER_STATE_ACTIVE          3 /* installed and active */
+
+       /* mini dump driver info */
+       struct mdump_driver_info drv_info;                      /* 0x218 */
 };
 
 
@@ -2898,8 +2994,8 @@ struct afex_stats {
 };
 
 #define BCM_5710_FW_MAJOR_VERSION                      7
-#define BCM_5710_FW_MINOR_VERSION                      10
-#define BCM_5710_FW_REVISION_VERSION           51
+#define BCM_5710_FW_MINOR_VERSION                      12
+#define BCM_5710_FW_REVISION_VERSION           30
 #define BCM_5710_FW_ENGINEERING_VERSION                0
 #define BCM_5710_FW_COMPILE_FLAGS                      1
 
@@ -3901,7 +3997,11 @@ struct eth_fast_path_rx_cqe {
        __le16 len_on_bd;
        struct parsing_flags pars_flags;
        union eth_sgl_or_raw_data sgl_or_raw_data;
-       __le32 reserved1[7];
+       u8 tunn_type;
+       u8 tunn_inner_hdrs_offset;
+       __le16 reserved1;
+       __le32 tunn_tenant_id;
+       __le32 padding[5];
        u32 marker;
 };
 
@@ -4012,8 +4112,8 @@ struct eth_tunnel_data {
        __le16 pseudo_csum;
        u8 ip_hdr_start_inner_w;
        u8 flags;
-#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER (0x1<<0)
-#define ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER_SHIFT 0
+#define ETH_TUNNEL_DATA_IPV6_OUTER (0x1<<0)
+#define ETH_TUNNEL_DATA_IPV6_OUTER_SHIFT 0
 #define ETH_TUNNEL_DATA_RESERVED (0x7F<<1)
 #define ETH_TUNNEL_DATA_RESERVED_SHIFT 1
 };
@@ -4120,16 +4220,12 @@ struct eth_rss_update_ramrod_data {
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 6
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY (0x1<<7)
 #define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY_SHIFT 7
-#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<8)
-#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 8
-#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY (0x1<<9)
-#define ETH_RSS_UPDATE_RAMROD_DATA_NVGRE_KEY_ENTROPY_CAPABILITY_SHIFT 9
-#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY (0x1<<10)
-#define ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY_SHIFT 10
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<11)
-#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 11
-#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0xF<<12)
-#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 12
+#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY (0x1<<8)
+#define ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY_SHIFT 8
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<9)
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 9
+#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED (0x3F<<10)
+#define ETH_RSS_UPDATE_RAMROD_DATA_RESERVED_SHIFT 10
        u8 rss_result_mask;
        u8 reserved3;
        __le16 reserved4;
@@ -4314,6 +4410,18 @@ enum eth_tunnel_non_lso_csum_location {
        MAX_ETH_TUNNEL_NON_LSO_CSUM_LOCATION
 };
 
+enum eth_tunn_type {
+       TUNN_TYPE_NONE,
+       TUNN_TYPE_VXLAN,
+       TUNN_TYPE_L2_GRE,
+       TUNN_TYPE_IPV4_GRE,
+       TUNN_TYPE_IPV6_GRE,
+       TUNN_TYPE_L2_GENEVE,
+       TUNN_TYPE_IPV4_GENEVE,
+       TUNN_TYPE_IPV6_GENEVE,
+       MAX_ETH_TUNN_TYPE
+};
+
 /*
  * Tx regular BD structure
  */
@@ -4758,6 +4866,9 @@ struct afex_vif_list_ramrod_data {
        __le16 reserved1;
 };
 
+struct c2s_pri_trans_table_entry {
+       u8 val[MAX_VLAN_PRIORITIES];
+};
 
 /*
  * cfc delete event data
@@ -5246,6 +5357,7 @@ struct flow_control_configuration {
        u8 dont_add_pri_0_en;
        u8 reserved1;
        __le32 reserved2;
+       u8 dcb_outer_pri[MAX_TRAFFIC_TYPES];
 };
 
 
@@ -5260,18 +5372,25 @@ struct function_start_data {
        u8 path_id;
        u8 network_cos_mode;
        u8 dmae_cmd_id;
-       u8 tunnel_mode;
-       u8 gre_tunnel_type;
-       u8 tunn_clss_en;
-       u8 inner_gre_rss_en;
-       u8 sd_accept_mf_clss_fail;
+       u8 no_added_tags;
+       __le16 reserved0;
+       __le32 reserved1;
+       u8 inner_clss_vxlan;
+       u8 inner_clss_l2gre;
+       u8 inner_clss_l2geneve;
+       u8 inner_rss;
        __le16 vxlan_dst_port;
+       __le16 geneve_dst_port;
+       u8 sd_accept_mf_clss_fail;
+       u8 sd_accept_mf_clss_fail_match_ethtype;
        __le16 sd_accept_mf_clss_fail_ethtype;
        __le16 sd_vlan_eth_type;
        u8 sd_vlan_force_pri_flg;
        u8 sd_vlan_force_pri_val;
-       u8 sd_accept_mf_clss_fail_match_ethtype;
-       u8 no_added_tags;
+       u8 c2s_pri_tt_valid;
+       u8 c2s_pri_default;
+       u8 reserved2[6];
+       struct c2s_pri_trans_table_entry c2s_pri_trans_table;
 };
 
 struct function_update_data {
@@ -5289,11 +5408,12 @@ struct function_update_data {
        u8 tx_switch_suspend;
        u8 echo;
        u8 update_tunn_cfg_flg;
-       u8 tunnel_mode;
-       u8 gre_tunnel_type;
-       u8 tunn_clss_en;
-       u8 inner_gre_rss_en;
+       u8 inner_clss_vxlan;
+       u8 inner_clss_l2gre;
+       u8 inner_clss_l2geneve;
+       u8 inner_rss;
        __le16 vxlan_dst_port;
+       __le16 geneve_dst_port;
        u8 sd_vlan_force_pri_change_flg;
        u8 sd_vlan_force_pri_flg;
        u8 sd_vlan_force_pri_val;
@@ -5302,6 +5422,8 @@ struct function_update_data {
        u8 reserved1;
        __le16 sd_vlan_tag;
        __le16 sd_vlan_eth_type;
+       __le16 reserved0;
+       __le32 reserved2;
 };
 
 /*
@@ -5330,15 +5452,6 @@ struct fw_version {
 #define __FW_VERSION_RESERVED_SHIFT 4
 };
 
-
-/* GRE Tunnel Mode */
-enum gre_tunnel_type {
-       NVGRE_TUNNEL,
-       L2GRE_TUNNEL,
-       IPGRE_TUNNEL,
-       MAX_GRE_TUNNEL_TYPE
-};
-
 /*
  * Dynamic Host-Coalescing - Driver(host) counters
  */
index d6e1975b7b691ab51ab536a7c9ec413601f24b6c..46ee2c01f4c5167209c23f3e1440b82630327cea 100644 (file)
@@ -1,7 +1,9 @@
-/* bnx2x_init.h: Broadcom Everest network driver.
+/* bnx2x_init.h: Qlogic Everest network driver.
  *               Structures and macroes needed during the initialization.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 5669ed2e87d0039ab02c3bc70636359e49c811e6..1835d2e451c0139e272e400774b0d6d19487df98 100644 (file)
@@ -1,8 +1,10 @@
-/* bnx2x_init_ops.h: Broadcom Everest network driver.
+/* bnx2x_init_ops.h: Qlogic Everest network driver.
  *               Static functions needed during the initialization.
  *               This file is "included" in bnx2x_main.c.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index a0b03c27e0a302c08fd1a78c5dbd2dd7606ae16a..d946bba43726f94b0d8a62973978a19d07959390 100644 (file)
@@ -1,13 +1,15 @@
 /* Copyright 2008-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
  * consent.
  *
  * Written by Yaniv Rosner
@@ -9652,6 +9654,13 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
 /******************************************************************/
 /*             BCM8481/BCM84823/BCM84833 PHY SECTION             */
 /******************************************************************/
+static int bnx2x_is_8483x_8485x(struct bnx2x_phy *phy)
+{
+       return ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+               (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) ||
+               (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858));
+}
+
 static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
                                            struct bnx2x *bp,
                                            u8 port)
@@ -9666,8 +9675,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
        };
        u16 fw_ver1;
 
-       if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-           (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+       if (bnx2x_is_8483x_8485x(phy)) {
                bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
                bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff,
                                phy->ver_addr);
@@ -9749,8 +9757,7 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
                bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
                                 reg_set[i].val);
 
-       if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-           (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
+       if (bnx2x_is_8483x_8485x(phy))
                offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1;
        else
                offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
@@ -9768,8 +9775,7 @@ static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
        struct bnx2x *bp = params->bp;
        switch (action) {
        case PHY_INIT:
-               if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
-                   (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+               if (!bnx2x_is_8483x_8485x(phy)) {
                        /* Save spirom version */
                        bnx2x_save_848xx_spirom_version(phy, bp, params->port);
                }
@@ -9901,8 +9907,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
        /* Always write this if this is not 84833/4.
         * For 84833/4, write it only when it's a forced speed.
         */
-       if (((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
-            (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) ||
+       if (!bnx2x_is_8483x_8485x(phy) ||
            ((autoneg_val & (1<<12)) == 0))
                bnx2x_cl45_write(bp, phy,
                         MDIO_AN_DEVAD,
@@ -9949,8 +9954,86 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
        return bnx2x_848xx_cmn_config_init(phy, params, vars);
 }
 
-#define PHY84833_CMDHDLR_WAIT 300
-#define PHY84833_CMDHDLR_MAX_ARGS 5
+#define PHY848xx_CMDHDLR_WAIT 300
+#define PHY848xx_CMDHDLR_MAX_ARGS 5
+
+static int bnx2x_84858_cmd_hdlr(struct bnx2x_phy *phy,
+                               struct link_params *params,
+                               u16 fw_cmd,
+                               u16 cmd_args[], int argc)
+{
+       int idx;
+       u16 val;
+       struct bnx2x *bp = params->bp;
+
+       /* Step 1: Poll the STATUS register to see whether the previous command
+        * is in progress or the system is busy (CMD_IN_PROGRESS or
+        * SYSTEM_BUSY). If previous command is in progress or system is busy,
+        * check again until the previous command finishes execution and the
+        * system is available for taking command
+        */
+
+       for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+               bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+                               MDIO_848xx_CMD_HDLR_STATUS, &val);
+               if ((val != PHY84858_STATUS_CMD_IN_PROGRESS) &&
+                   (val != PHY84858_STATUS_CMD_SYSTEM_BUSY))
+                       break;
+               usleep_range(1000, 2000);
+       }
+       if (idx >= PHY848xx_CMDHDLR_WAIT) {
+               DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
+               return -EINVAL;
+       }
+
+       /* Step2: If any parameters are required for the function, write them
+        * to the required DATA registers
+        */
+
+       for (idx = 0; idx < argc; idx++) {
+               bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+                                MDIO_848xx_CMD_HDLR_DATA1 + idx,
+                                cmd_args[idx]);
+       }
+
+       /* Step3: When the firmware is ready for commands, write the 'Command
+        * code' to the CMD register
+        */
+       bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+                        MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
+
+       /* Step4: Once the command has been written, poll the STATUS register
+        * to check whether the command has completed (CMD_COMPLETED_PASS/
+        * CMD_FOR_CMDS or CMD_COMPLETED_ERROR).
+        */
+
+       for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
+               bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+                               MDIO_848xx_CMD_HDLR_STATUS, &val);
+               if ((val == PHY84858_STATUS_CMD_COMPLETE_PASS) ||
+                   (val == PHY84858_STATUS_CMD_COMPLETE_ERROR))
+                       break;
+               usleep_range(1000, 2000);
+       }
+       if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
+           (val == PHY84858_STATUS_CMD_COMPLETE_ERROR)) {
+               DP(NETIF_MSG_LINK, "FW cmd failed.\n");
+               return -EINVAL;
+       }
+       /* Step5: Once the command has completed, read the specficied DATA
+        * registers for any saved results for the command, if applicable
+        */
+
+       /* Gather returning data */
+       for (idx = 0; idx < argc; idx++) {
+               bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+                               MDIO_848xx_CMD_HDLR_DATA1 + idx,
+                               &cmd_args[idx]);
+       }
+
+       return 0;
+}
+
 static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
                                struct link_params *params, u16 fw_cmd,
                                u16 cmd_args[], int argc)
@@ -9960,16 +10043,16 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
        struct bnx2x *bp = params->bp;
        /* Write CMD_OPEN_OVERRIDE to STATUS reg */
        bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                       MDIO_84833_CMD_HDLR_STATUS,
+                       MDIO_848xx_CMD_HDLR_STATUS,
                        PHY84833_STATUS_CMD_OPEN_OVERRIDE);
-       for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
+       for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
                bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-                               MDIO_84833_CMD_HDLR_STATUS, &val);
+                               MDIO_848xx_CMD_HDLR_STATUS, &val);
                if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
                        break;
                usleep_range(1000, 2000);
        }
-       if (idx >= PHY84833_CMDHDLR_WAIT) {
+       if (idx >= PHY848xx_CMDHDLR_WAIT) {
                DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
                return -EINVAL;
        }
@@ -9977,42 +10060,62 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
        /* Prepare argument(s) and issue command */
        for (idx = 0; idx < argc; idx++) {
                bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                               MDIO_84833_CMD_HDLR_DATA1 + idx,
+                               MDIO_848xx_CMD_HDLR_DATA1 + idx,
                                cmd_args[idx]);
        }
        bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                       MDIO_84833_CMD_HDLR_COMMAND, fw_cmd);
-       for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
+                       MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
+       for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
                bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-                               MDIO_84833_CMD_HDLR_STATUS, &val);
+                               MDIO_848xx_CMD_HDLR_STATUS, &val);
                if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
-                       (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
+                   (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
                        break;
                usleep_range(1000, 2000);
        }
-       if ((idx >= PHY84833_CMDHDLR_WAIT) ||
-               (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
+       if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
+           (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
                DP(NETIF_MSG_LINK, "FW cmd failed.\n");
                return -EINVAL;
        }
        /* Gather returning data */
        for (idx = 0; idx < argc; idx++) {
                bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
-                               MDIO_84833_CMD_HDLR_DATA1 + idx,
+                               MDIO_848xx_CMD_HDLR_DATA1 + idx,
                                &cmd_args[idx]);
        }
        bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
-                       MDIO_84833_CMD_HDLR_STATUS,
+                       MDIO_848xx_CMD_HDLR_STATUS,
                        PHY84833_STATUS_CMD_CLEAR_COMPLETE);
        return 0;
 }
 
-static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
-                                  struct link_params *params,
-                                  struct link_vars *vars)
+static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy,
+                               struct link_params *params,
+                               u16 fw_cmd,
+                               u16 cmd_args[], int argc)
+{
+       struct bnx2x *bp = params->bp;
+
+       if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) ||
+           (REG_RD(bp, params->shmem2_base +
+                   offsetof(struct shmem2_region,
+                            link_attr_sync[params->port])) &
+            LINK_ATTR_84858)) {
+               return bnx2x_84858_cmd_hdlr(phy, params, fw_cmd, cmd_args,
+                                           argc);
+       } else {
+               return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args,
+                                           argc);
+       }
+}
+
+static int bnx2x_848xx_pair_swap_cfg(struct bnx2x_phy *phy,
+                                    struct link_params *params,
+                                    struct link_vars *vars)
 {
        u32 pair_swap;
-       u16 data[PHY84833_CMDHDLR_MAX_ARGS];
+       u16 data[PHY848xx_CMDHDLR_MAX_ARGS];
        int status;
        struct bnx2x *bp = params->bp;
 
@@ -10028,8 +10131,9 @@ static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
        /* Only the second argument is used for this command */
        data[1] = (u16)pair_swap;
 
-       status = bnx2x_84833_cmd_hdlr(phy, params,
-               PHY84833_CMD_SET_PAIR_SWAP, data, PHY84833_CMDHDLR_MAX_ARGS);
+       status = bnx2x_848xx_cmd_hdlr(phy, params,
+                                     PHY848xx_CMD_SET_PAIR_SWAP, data,
+                                     PHY848xx_CMDHDLR_MAX_ARGS);
        if (status == 0)
                DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
 
@@ -10118,8 +10222,8 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
        DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
 
        /* Prevent Phy from working in EEE and advertising it */
-       rc = bnx2x_84833_cmd_hdlr(phy, params,
-               PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+       rc = bnx2x_848xx_cmd_hdlr(phy, params,
+                                 PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1);
        if (rc) {
                DP(NETIF_MSG_LINK, "EEE disable failed.\n");
                return rc;
@@ -10136,8 +10240,8 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
        struct bnx2x *bp = params->bp;
        u16 cmd_args = 1;
 
-       rc = bnx2x_84833_cmd_hdlr(phy, params,
-               PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+       rc = bnx2x_848xx_cmd_hdlr(phy, params,
+                                 PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1);
        if (rc) {
                DP(NETIF_MSG_LINK, "EEE enable failed.\n");
                return rc;
@@ -10155,7 +10259,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
        u8 port, initialize = 1;
        u16 val;
        u32 actual_phy_selection;
-       u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
+       u16 cmd_args[PHY848xx_CMDHDLR_MAX_ARGS];
        int rc = 0;
 
        usleep_range(1000, 2000);
@@ -10180,8 +10284,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
 
        /* Wait for GPHY to come out of reset */
        msleep(50);
-       if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
-           (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+       if (!bnx2x_is_8483x_8485x(phy)) {
                /* BCM84823 requires that XGXS links up first @ 10G for normal
                 * behavior.
                 */
@@ -10192,7 +10295,19 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
                vars->line_speed = temp;
        }
+       /* Check if this is actually BCM84858 */
+       if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
+               u16 hw_rev;
 
+               bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+                               MDIO_AN_REG_848xx_ID_MSB, &hw_rev);
+               if (hw_rev == BCM84858_PHY_ID) {
+                       params->link_attr_sync |= LINK_ATTR_84858;
+                       bnx2x_update_link_attr(params, params->link_attr_sync);
+               }
+       }
+
+       /* Set dual-media configuration according to configuration */
        bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
                        MDIO_CTL_REG_84823_MEDIA, &val);
        val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
@@ -10237,18 +10352,17 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
        DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
                   params->multi_phy_config, val);
 
-       if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-           (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
-               bnx2x_84833_pair_swap_cfg(phy, params, vars);
+       if (bnx2x_is_8483x_8485x(phy)) {
+               bnx2x_848xx_pair_swap_cfg(phy, params, vars);
 
                /* Keep AutogrEEEn disabled. */
                cmd_args[0] = 0x0;
                cmd_args[1] = 0x0;
                cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
                cmd_args[3] = PHY84833_CONSTANT_LATENCY;
-               rc = bnx2x_84833_cmd_hdlr(phy, params,
-                       PHY84833_CMD_SET_EEE_MODE, cmd_args,
-                       PHY84833_CMDHDLR_MAX_ARGS);
+               rc = bnx2x_848xx_cmd_hdlr(phy, params,
+                                         PHY848xx_CMD_SET_EEE_MODE, cmd_args,
+                                         PHY848xx_CMDHDLR_MAX_ARGS);
                if (rc)
                        DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
        }
@@ -10302,8 +10416,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
        }
 
-       if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-           (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) {
+       if (bnx2x_is_8483x_8485x(phy)) {
                /* Bring PHY out of super isolate mode as the final step. */
                bnx2x_cl45_read_and_write(bp, phy,
                                          MDIO_CTL_DEVAD,
@@ -10435,8 +10548,7 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
                                LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
 
                /* Determine if EEE was negotiated */
-               if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-                   (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
+               if (bnx2x_is_8483x_8485x(phy))
                        bnx2x_eee_an_resolve(phy, params, vars);
        }
 
@@ -11842,6 +11954,40 @@ static const struct bnx2x_phy phy_84834 = {
        .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
 };
 
+static const struct bnx2x_phy phy_84858 = {
+       .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858,
+       .addr           = 0xff,
+       .def_md_devad   = 0,
+       .flags          = FLAGS_FAN_FAILURE_DET_REQ |
+                           FLAGS_REARM_LATCH_SIGNAL,
+       .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+       .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
+       .mdio_ctrl      = 0,
+       .supported      = (SUPPORTED_100baseT_Half |
+                          SUPPORTED_100baseT_Full |
+                          SUPPORTED_1000baseT_Full |
+                          SUPPORTED_10000baseT_Full |
+                          SUPPORTED_TP |
+                          SUPPORTED_Autoneg |
+                          SUPPORTED_Pause |
+                          SUPPORTED_Asym_Pause),
+       .media_type     = ETH_PHY_BASE_T,
+       .ver_addr       = 0,
+       .req_flow_ctrl  = 0,
+       .req_line_speed = 0,
+       .speed_cap_mask = 0,
+       .req_duplex     = 0,
+       .rsrv           = 0,
+       .config_init    = (config_init_t)bnx2x_848x3_config_init,
+       .read_status    = (read_status_t)bnx2x_848xx_read_status,
+       .link_reset     = (link_reset_t)bnx2x_848x3_link_reset,
+       .config_loopback = (config_loopback_t)NULL,
+       .format_fw_ver  = (format_fw_ver_t)bnx2x_848xx_format_ver,
+       .hw_reset       = (hw_reset_t)bnx2x_84833_hw_reset_phy,
+       .set_link_led   = (set_link_led_t)bnx2x_848xx_set_link_led,
+       .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
+};
+
 static const struct bnx2x_phy phy_54618se = {
        .type           = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE,
        .addr           = 0xff,
@@ -12128,6 +12274,9 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
                *phy = phy_84834;
                break;
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858:
+               *phy = phy_84858;
+               break;
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616:
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
                *phy = phy_54618se;
@@ -12184,9 +12333,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
        }
        phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
 
-       if (((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
-            (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834)) &&
-           (phy->ver_addr)) {
+       if (bnx2x_is_8483x_8485x(phy) && (phy->ver_addr)) {
                /* Remove 100Mb link supported for BCM84833/4 when phy fw
                 * version lower than or equal to 1.39
                 */
@@ -13281,6 +13428,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
                break;
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
        case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834:
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858:
                /* GPIO3's are linked, and so both need to be toggled
                 * to obtain required 2us pulse.
                 */
index d9cce4c3899b7b9d388cf28a6f4bf0feece3c4b2..b7d251108c19f56345304d88097619a58b08b8e4 100644 (file)
@@ -1,13 +1,15 @@
 /* Copyright 2008-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
  * consent.
  *
  * Written by Yaniv Rosner
index c27af12314ed29ae19e73a9c00f56c062a5aa830..31c63aa2252166a4a9fb5d811735d764e8d1d082 100644 (file)
@@ -1,6 +1,8 @@
-/* bnx2x_main.c: Broadcom Everest network driver.
+/* bnx2x_main.c: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #define TX_TIMEOUT             (5*HZ)
 
 static char version[] =
-       "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
+       "QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver "
        DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 
 MODULE_AUTHOR("Eliezer Tamir");
-MODULE_DESCRIPTION("Broadcom NetXtreme II "
+MODULE_DESCRIPTION("QLogic "
                   "BCM57710/57711/57711E/"
                   "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
                   "57840/57840_MF Driver");
@@ -163,27 +165,27 @@ enum bnx2x_board_type {
 static struct {
        char *name;
 } board_info[] = {
-       [BCM57710]      = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
-       [BCM57711]      = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
-       [BCM57711E]     = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
-       [BCM57712]      = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
-       [BCM57712_MF]   = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
-       [BCM57712_VF]   = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" },
-       [BCM57800]      = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
-       [BCM57800_MF]   = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
-       [BCM57800_VF]   = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" },
-       [BCM57810]      = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
-       [BCM57810_MF]   = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
-       [BCM57810_VF]   = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" },
-       [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
-       [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
-       [BCM57840_MF]   = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
-       [BCM57840_VF]   = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" },
-       [BCM57811]      = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" },
-       [BCM57811_MF]   = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" },
-       [BCM57840_O]    = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
-       [BCM57840_MFO]  = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
-       [BCM57811_VF]   = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }
+       [BCM57710]      = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" },
+       [BCM57711]      = { "QLogic BCM57711 10 Gigabit PCIe" },
+       [BCM57711E]     = { "QLogic BCM57711E 10 Gigabit PCIe" },
+       [BCM57712]      = { "QLogic BCM57712 10 Gigabit Ethernet" },
+       [BCM57712_MF]   = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" },
+       [BCM57712_VF]   = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" },
+       [BCM57800]      = { "QLogic BCM57800 10 Gigabit Ethernet" },
+       [BCM57800_MF]   = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" },
+       [BCM57800_VF]   = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" },
+       [BCM57810]      = { "QLogic BCM57810 10 Gigabit Ethernet" },
+       [BCM57810_MF]   = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" },
+       [BCM57810_VF]   = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" },
+       [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" },
+       [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" },
+       [BCM57840_MF]   = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
+       [BCM57840_VF]   = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" },
+       [BCM57811]      = { "QLogic BCM57811 10 Gigabit Ethernet" },
+       [BCM57811_MF]   = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" },
+       [BCM57840_O]    = { "QLogic BCM57840 10/20 Gigabit Ethernet" },
+       [BCM57840_MFO]  = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
+       [BCM57811_VF]   = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }
 };
 
 #ifndef PCI_DEVICE_ID_NX2_57710
@@ -2916,7 +2918,7 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
        func_params.f_obj = &bp->func_obj;
        func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
 
-       if (IS_MF_UFP(bp)) {
+       if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
                int func = BP_ABS_FUNC(bp);
                u32 val;
 
@@ -2943,16 +2945,16 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
                        BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
                                  bp->mf_ov);
                        goto fail;
+               } else {
+                       DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n",
+                          bp->mf_ov);
                }
-
-               DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n", bp->mf_ov);
-
-               bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
-
-               return;
+       } else {
+               goto fail;
        }
 
-       /* not supported by SW yet */
+       bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
+       return;
 fail:
        bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
 }
@@ -3065,7 +3067,7 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
        storm_memset_func_en(bp, p->func_id, 1);
 
        /* spq */
-       if (p->func_flgs & FUNC_FLG_SPQ) {
+       if (p->spq_active) {
                storm_memset_spq_addr(bp, p->spq_map, p->func_id);
                REG_WR(bp, XSEM_REG_FAST_MEMORY +
                       XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
@@ -3281,7 +3283,6 @@ static void bnx2x_pf_init(struct bnx2x *bp)
 {
        struct bnx2x_func_init_params func_init = {0};
        struct event_ring_data eq_data = { {0} };
-       u16 flags;
 
        if (!CHIP_IS_E1x(bp)) {
                /* reset IGU PF statistics: MSIX + ATTN */
@@ -3298,15 +3299,7 @@ static void bnx2x_pf_init(struct bnx2x *bp)
                                BP_FUNC(bp) : BP_VN(bp))*4, 0);
        }
 
-       /* function setup flags */
-       flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
-
-       /* This flag is relevant for E1x only.
-        * E2 doesn't have a TPA configuration in a function level.
-        */
-       flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0;
-
-       func_init.func_flgs = flags;
+       func_init.spq_active = true;
        func_init.pf_id = BP_FUNC(bp);
        func_init.func_id = BP_FUNC(bp);
        func_init.spq_map = bp->spq_mapping;
@@ -3707,6 +3700,34 @@ out:
           ethver, iscsiver, fcoever);
 }
 
+void bnx2x_update_mfw_dump(struct bnx2x *bp)
+{
+       struct timeval epoc;
+       u32 drv_ver;
+       u32 valid_dump;
+
+       if (!SHMEM2_HAS(bp, drv_info))
+               return;
+
+       /* Update Driver load time */
+       do_gettimeofday(&epoc);
+       SHMEM2_WR(bp, drv_info.epoc, epoc.tv_sec);
+
+       drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
+       SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
+
+       SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));
+
+       /* Check & notify On-Chip dump. */
+       valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);
+
+       if (valid_dump & FIRST_DUMP_VALID)
+               DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n");
+
+       if (valid_dump & SECOND_DUMP_VALID)
+               DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n");
+}
+
 static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
 {
        u32 cmd_ok, cmd_fail;
@@ -5273,6 +5294,10 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
                else
                        vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
 
+               break;
+       case BNX2X_FILTER_VLAN_PENDING:
+               DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
+               vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
                break;
        case BNX2X_FILTER_MCAST_PENDING:
                DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
@@ -5568,6 +5593,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
                      BNX2X_STATE_OPEN):
                case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
                      BNX2X_STATE_OPENING_WAIT4_PORT):
+               case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
+                     BNX2X_STATE_CLOSING_WAIT4_HALT):
                        cid = elem->message.data.eth_event.echo &
                                BNX2X_SWCID_MASK;
                        DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
@@ -5585,7 +5612,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
                      BNX2X_STATE_DIAG):
                case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
                      BNX2X_STATE_CLOSING_WAIT4_HALT):
-                       DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
+                       DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
                        bnx2x_handle_classification_eqe(bp, elem);
                        break;
 
@@ -6173,6 +6200,11 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
                __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
                __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
 
+               if (bp->accept_any_vlan) {
+                       __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+                       __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+               }
+
                break;
        case BNX2X_RX_MODE_ALLMULTI:
                __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
@@ -6184,6 +6216,11 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
                __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
                __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
 
+               if (bp->accept_any_vlan) {
+                       __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+                       __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+               }
+
                break;
        case BNX2X_RX_MODE_PROMISC:
                /* According to definition of SI mode, iface in promisc mode
@@ -6204,18 +6241,15 @@ static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
                else
                        __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
 
+               __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
+               __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
+
                break;
        default:
                BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
                return -EINVAL;
        }
 
-       /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
-       if (rx_mode != BNX2X_RX_MODE_NONE) {
-               __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
-               __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
-       }
-
        return 0;
 }
 
@@ -7429,6 +7463,9 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
        } else
                BNX2X_ERR("Bootcode is missing - can not initialize link\n");
 
+       if (SHMEM2_HAS(bp, netproc_fw_ver))
+               SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM));
+
        return 0;
 }
 
@@ -8406,6 +8443,42 @@ int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
        return rc;
 }
 
+int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
+                      struct bnx2x_vlan_mac_obj *obj, bool set,
+                      unsigned long *ramrod_flags)
+{
+       int rc;
+       struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+
+       memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+       /* Fill general parameters */
+       ramrod_param.vlan_mac_obj = obj;
+       ramrod_param.ramrod_flags = *ramrod_flags;
+
+       /* Fill a user request section if needed */
+       if (!test_bit(RAMROD_CONT, ramrod_flags)) {
+               ramrod_param.user_req.u.vlan.vlan = vlan;
+               /* Set the command: ADD or DEL */
+               if (set)
+                       ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+               else
+                       ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+       }
+
+       rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+
+       if (rc == -EEXIST) {
+               /* Do not treat adding same vlan as error. */
+               DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
+               rc = 0;
+       } else if (rc < 0) {
+               BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del"));
+       }
+
+       return rc;
+}
+
 int bnx2x_del_all_macs(struct bnx2x *bp,
                       struct bnx2x_vlan_mac_obj *mac_obj,
                       int mac_type, bool wait_for_comp)
@@ -11678,7 +11751,7 @@ static void validate_set_si_mode(struct bnx2x *bp)
 static int bnx2x_get_hwinfo(struct bnx2x *bp)
 {
        int /*abs*/func = BP_ABS_FUNC(bp);
-       int vn;
+       int vn, mfw_vn;
        u32 val = 0, val2 = 0;
        int rc = 0;
 
@@ -11768,6 +11841,7 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
        bp->mf_mode = 0;
        bp->mf_sub_mode = 0;
        vn = BP_VN(bp);
+       mfw_vn = BP_FW_MB_IDX(bp);
 
        if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
                BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
@@ -11824,6 +11898,31 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
                                } else
                                        BNX2X_DEV_INFO("illegal OV for SD\n");
                                break;
+                       case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE:
+                               bp->mf_mode = MULTI_FUNCTION_SD;
+                               bp->mf_sub_mode = SUB_MF_MODE_BD;
+                               bp->mf_config[vn] =
+                                       MF_CFG_RD(bp,
+                                                 func_mf_config[func].config);
+
+                               if (SHMEM2_HAS(bp, mtu_size)) {
+                                       int mtu_idx = BP_FW_MB_IDX(bp);
+                                       u16 mtu_size;
+                                       u32 mtu;
+
+                                       mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]);
+                                       mtu_size = (u16)mtu;
+                                       DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n",
+                                          mtu_size, mtu);
+
+                                       /* if valid: update device mtu */
+                                       if (((mtu_size + ETH_HLEN) >=
+                                            ETH_MIN_PACKET_SIZE) &&
+                                           (mtu_size <=
+                                            ETH_MAX_JUMBO_PACKET_SIZE))
+                                               bp->dev->mtu = mtu_size;
+                               }
+                               break;
                        case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
                                bp->mf_mode = MULTI_FUNCTION_SD;
                                bp->mf_sub_mode = SUB_MF_MODE_UFP;
@@ -11871,9 +11970,10 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
 
                                BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
                                               func, bp->mf_ov, bp->mf_ov);
-                       } else if (bp->mf_sub_mode == SUB_MF_MODE_UFP) {
+                       } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) ||
+                                  (bp->mf_sub_mode == SUB_MF_MODE_BD)) {
                                dev_err(&bp->pdev->dev,
-                                       "Unexpected - no valid MF OV for func %d in UFP mode\n",
+                                       "Unexpected - no valid MF OV for func %d in UFP/BD mode\n",
                                        func);
                                bp->path_has_ovlan = true;
                        } else {
@@ -12078,6 +12178,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
        mutex_init(&bp->drv_info_mutex);
        sema_init(&bp->stats_lock, 1);
        bp->drv_info_mng_owner = false;
+       INIT_LIST_HEAD(&bp->vlan_reg);
 
        INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
        INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -12596,6 +12697,169 @@ static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
        return vxlan_features_check(skb, features);
 }
 
+static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
+{
+       int rc;
+
+       if (IS_PF(bp)) {
+               unsigned long ramrod_flags = 0;
+
+               __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+               rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj,
+                                       add, &ramrod_flags);
+       } else {
+               rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add);
+       }
+
+       return rc;
+}
+
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+{
+       struct bnx2x_vlan_entry *vlan;
+       int rc = 0;
+
+       if (!bp->vlan_cnt) {
+               DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
+               return 0;
+       }
+
+       list_for_each_entry(vlan, &bp->vlan_reg, link) {
+               /* Prepare for cleanup in case of errors */
+               if (rc) {
+                       vlan->hw = false;
+                       continue;
+               }
+
+               if (!vlan->hw)
+                       continue;
+
+               DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
+
+               rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
+               if (rc) {
+                       BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
+                       vlan->hw = false;
+                       rc = -EINVAL;
+                       continue;
+               }
+       }
+
+       return rc;
+}
+
+static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       struct bnx2x_vlan_entry *vlan;
+       bool hw = false;
+       int rc = 0;
+
+       if (!netif_running(bp->dev)) {
+               DP(NETIF_MSG_IFUP,
+                  "Ignoring VLAN configuration the interface is down\n");
+               return -EFAULT;
+       }
+
+       DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
+
+       vlan = kmalloc(sizeof(*vlan), GFP_KERNEL);
+       if (!vlan)
+               return -ENOMEM;
+
+       bp->vlan_cnt++;
+       if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
+               DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
+               bp->accept_any_vlan = true;
+               if (IS_PF(bp))
+                       bnx2x_set_rx_mode_inner(bp);
+               else
+                       bnx2x_vfpf_storm_rx_mode(bp);
+       } else if (bp->vlan_cnt <= bp->vlan_credit) {
+               rc = __bnx2x_vlan_configure_vid(bp, vid, true);
+               hw = true;
+       }
+
+       vlan->vid = vid;
+       vlan->hw = hw;
+
+       if (!rc) {
+               list_add(&vlan->link, &bp->vlan_reg);
+       } else {
+               bp->vlan_cnt--;
+               kfree(vlan);
+       }
+
+       DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
+
+       return rc;
+}
+
+static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       struct bnx2x_vlan_entry *vlan;
+       int rc = 0;
+
+       if (!netif_running(bp->dev)) {
+               DP(NETIF_MSG_IFUP,
+                  "Ignoring VLAN configuration the interface is down\n");
+               return -EFAULT;
+       }
+
+       DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
+
+       if (!bp->vlan_cnt) {
+               BNX2X_ERR("Unable to kill VLAN %d\n", vid);
+               return -EINVAL;
+       }
+
+       list_for_each_entry(vlan, &bp->vlan_reg, link)
+               if (vlan->vid == vid)
+                       break;
+
+       if (vlan->vid != vid) {
+               BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
+               return -EINVAL;
+       }
+
+       if (vlan->hw)
+               rc = __bnx2x_vlan_configure_vid(bp, vid, false);
+
+       list_del(&vlan->link);
+       kfree(vlan);
+
+       bp->vlan_cnt--;
+
+       if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
+               /* Configure all non-configured entries */
+               list_for_each_entry(vlan, &bp->vlan_reg, link) {
+                       if (vlan->hw)
+                               continue;
+
+                       rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
+                       if (rc) {
+                               BNX2X_ERR("Unable to config VLAN %d\n",
+                                         vlan->vid);
+                               continue;
+                       }
+                       DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
+                          vlan->vid);
+                       vlan->hw = true;
+               }
+               DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
+               bp->accept_any_vlan = false;
+               if (IS_PF(bp))
+                       bnx2x_set_rx_mode_inner(bp);
+               else
+                       bnx2x_vfpf_storm_rx_mode(bp);
+       }
+
+       DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
+
+       return rc;
+}
+
 static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_open               = bnx2x_open,
        .ndo_stop               = bnx2x_close,
@@ -12609,6 +12873,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_fix_features       = bnx2x_fix_features,
        .ndo_set_features       = bnx2x_set_features,
        .ndo_tx_timeout         = bnx2x_tx_timeout,
+       .ndo_vlan_rx_add_vid    = bnx2x_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = bnx2x_vlan_rx_kill_vid,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = poll_bnx2x,
 #endif
@@ -12819,6 +13085,18 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
        dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
 
+       /* VF with OLD Hypervisor or old PF do not support filtering */
+       if (IS_PF(bp)) {
+               if (CHIP_IS_E1x(bp))
+                       bp->accept_any_vlan = true;
+               else
+                       dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#ifdef CONFIG_BNX2X_SRIOV
+       } else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
+               dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#endif
+       }
+
        dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
        dev->features |= NETIF_F_HIGHDMA;
 
@@ -13561,6 +13839,9 @@ static int bnx2x_init_one(struct pci_dev *pdev,
 
        bnx2x_register_phc(bp);
 
+       if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+               bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
+
        return 0;
 
 init_one_exit:
@@ -13623,6 +13904,7 @@ static void __bnx2x_remove(struct pci_dev *pdev,
        /* Power on: we can't let PCI layer write to us while we are in D3 */
        if (IS_PF(bp)) {
                bnx2x_set_power_state(bp, PCI_D0);
+               bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED);
 
                /* Set endianity registers to reset values in case next driver
                 * boots in different endianty environment.
index caf1aef651eb0bd38081887b68315597c2a5dc16..a91ccbf363451585bfd51e4203180fa45b9f8bcd 100644 (file)
@@ -1,6 +1,8 @@
-/* bnx2x_mfw_req.h: Broadcom Everest network driver.
+/* bnx2x_mfw_req.h: Qlogic Everest network driver.
  *
  * Copyright (c) 2012-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 49d511092c82fc514832fc4aa30078a08a6f19ad..4dead49bd5cb0866ff89db2c5f2e149536b1ea3c 100644 (file)
@@ -1,6 +1,8 @@
-/* bnx2x_reg.h: Broadcom Everest network driver.
+/* bnx2x_reg.h: Qlogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 /* [RW 1] When this bit is set; the LLH will expect all packets to be with
    e1hov */
 #define NIG_REG_LLH_E1HOV_MODE                                  0x160d8
+/* [RW 16] Outer VLAN type identifier for multi-function mode. In non
+ * multi-function mode; it will hold the inner VLAN type. Typically 0x8100.
+ */
+#define NIG_REG_LLH_E1HOV_TYPE_1                                0x16028
 /* [RW 1] When this bit is set; the LLH will classify the packet before
    sending it to the BRB or calculating WoL on it. */
 #define NIG_REG_LLH_MF_MODE                                     0x16024
 #define PBF_REG_TQ_OCCUPANCY_Q0                                         0x1403ac
 /* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */
 #define PBF_REG_TQ_OCCUPANCY_Q1                                         0x1403b0
-#define PB_REG_CONTROL                                          0
+/* [RW 16] One of 8 values that should be compared to type in Ethernet
+ * parsing. If there is a match; the field after Ethernet is the first VLAN.
+ * Reset value is 0x8100 which is the standard VLAN type. Note that when
+ * checking second VLAN; type is compared only to 0x8100.
+ */
+#define PBF_REG_VLAN_TYPE_0                                     0x15c06c
 /* [RW 2] Interrupt mask register #0 read/write */
 #define PB_REG_PB_INT_MASK                                      0x28
 /* [R 2] Interrupt register #0 read */
 #define PRS_REG_TCM_CURRENT_CREDIT                              0x40160
 /* [R 8] debug only: TSDM current credit. Transaction based. */
 #define PRS_REG_TSDM_CURRENT_CREDIT                             0x4015c
+/* [RW 16] One of 8 values that should be compared to type in Ethernet
+ * parsing. If there is a match; the field after Ethernet is the first VLAN.
+ * Reset value is 0x8100 which is the standard VLAN type. Note that when
+ * checking second VLAN; type is compared only to 0x8100.
+ */
+#define PRS_REG_VLAN_TYPE_0                                     0x401a8
 #define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT                    (0x1<<19)
 #define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF                     (0x1<<20)
 #define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN                  (0x1<<22)
@@ -7240,6 +7257,9 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_AN_REG_8481_LEGACY_MII_CTRL       0xffe0
 #define MDIO_AN_REG_8481_MII_CTRL_FORCE_1G     0x40
 #define MDIO_AN_REG_8481_LEGACY_MII_STATUS     0xffe1
+#define MDIO_AN_REG_848xx_ID_MSB               0xffe2
+#define BCM84858_PHY_ID                                        0x600d
+#define MDIO_AN_REG_848xx_ID_LSB               0xffe3
 #define MDIO_AN_REG_8481_LEGACY_AN_ADV         0xffe4
 #define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION   0xffe6
 #define MDIO_AN_REG_8481_1000T_CTRL            0xffe9
@@ -7283,31 +7303,31 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_84833_TOP_CFG_FW_NO_EEE           0x1f81
 #define MDIO_84833_TOP_CFG_XGPHY_STRAP1                        0x401a
 #define MDIO_84833_SUPER_ISOLATE               0x8000
-/* These are mailbox register set used by 84833. */
-#define MDIO_84833_TOP_CFG_SCRATCH_REG0                        0x4005
-#define MDIO_84833_TOP_CFG_SCRATCH_REG1                        0x4006
-#define MDIO_84833_TOP_CFG_SCRATCH_REG2                        0x4007
-#define MDIO_84833_TOP_CFG_SCRATCH_REG3                        0x4008
-#define MDIO_84833_TOP_CFG_SCRATCH_REG4                        0x4009
-#define MDIO_84833_TOP_CFG_SCRATCH_REG26               0x4037
-#define MDIO_84833_TOP_CFG_SCRATCH_REG27               0x4038
-#define MDIO_84833_TOP_CFG_SCRATCH_REG28               0x4039
-#define MDIO_84833_TOP_CFG_SCRATCH_REG29               0x403a
-#define MDIO_84833_TOP_CFG_SCRATCH_REG30               0x403b
-#define MDIO_84833_TOP_CFG_SCRATCH_REG31               0x403c
-#define MDIO_84833_CMD_HDLR_COMMAND    MDIO_84833_TOP_CFG_SCRATCH_REG0
-#define MDIO_84833_CMD_HDLR_STATUS     MDIO_84833_TOP_CFG_SCRATCH_REG26
-#define MDIO_84833_CMD_HDLR_DATA1      MDIO_84833_TOP_CFG_SCRATCH_REG27
-#define MDIO_84833_CMD_HDLR_DATA2      MDIO_84833_TOP_CFG_SCRATCH_REG28
-#define MDIO_84833_CMD_HDLR_DATA3      MDIO_84833_TOP_CFG_SCRATCH_REG29
-#define MDIO_84833_CMD_HDLR_DATA4      MDIO_84833_TOP_CFG_SCRATCH_REG30
-#define MDIO_84833_CMD_HDLR_DATA5      MDIO_84833_TOP_CFG_SCRATCH_REG31
+/* These are mailbox register set used by 84833/84858. */
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG0                        0x4005
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG1                        0x4006
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG2                        0x4007
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG3                        0x4008
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG4                        0x4009
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG26               0x4037
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG27               0x4038
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG28               0x4039
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG29               0x403a
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG30               0x403b
+#define MDIO_848xx_TOP_CFG_SCRATCH_REG31               0x403c
+#define MDIO_848xx_CMD_HDLR_COMMAND    (MDIO_848xx_TOP_CFG_SCRATCH_REG0)
+#define MDIO_848xx_CMD_HDLR_STATUS     (MDIO_848xx_TOP_CFG_SCRATCH_REG26)
+#define MDIO_848xx_CMD_HDLR_DATA1      (MDIO_848xx_TOP_CFG_SCRATCH_REG27)
+#define MDIO_848xx_CMD_HDLR_DATA2      (MDIO_848xx_TOP_CFG_SCRATCH_REG28)
+#define MDIO_848xx_CMD_HDLR_DATA3      (MDIO_848xx_TOP_CFG_SCRATCH_REG29)
+#define MDIO_848xx_CMD_HDLR_DATA4      (MDIO_848xx_TOP_CFG_SCRATCH_REG30)
+#define MDIO_848xx_CMD_HDLR_DATA5      (MDIO_848xx_TOP_CFG_SCRATCH_REG31)
 
-/* Mailbox command set used by 84833. */
-#define PHY84833_CMD_SET_PAIR_SWAP                     0x8001
-#define PHY84833_CMD_GET_EEE_MODE                      0x8008
-#define PHY84833_CMD_SET_EEE_MODE                      0x8009
-/* Mailbox status set used by 84833. */
+/* Mailbox command set used by 84833/84858 */
+#define PHY848xx_CMD_SET_PAIR_SWAP                     0x8001
+#define PHY848xx_CMD_GET_EEE_MODE                      0x8008
+#define PHY848xx_CMD_SET_EEE_MODE                      0x8009
+/* Mailbox status set used by 84833 only */
 #define PHY84833_STATUS_CMD_RECEIVED                   0x0001
 #define PHY84833_STATUS_CMD_IN_PROGRESS                        0x0002
 #define PHY84833_STATUS_CMD_COMPLETE_PASS              0x0004
@@ -7318,6 +7338,13 @@ Theotherbitsarereservedandshouldbezero*/
 #define PHY84833_STATUS_CMD_CLEAR_COMPLETE             0x0080
 #define PHY84833_STATUS_CMD_OPEN_OVERRIDE              0xa5a5
 
+/* Mailbox status set used by 84858 only */
+#define PHY84858_STATUS_CMD_RECEIVED                   0x0001
+#define PHY84858_STATUS_CMD_IN_PROGRESS                        0x0002
+#define PHY84858_STATUS_CMD_COMPLETE_PASS              0x0004
+#define PHY84858_STATUS_CMD_COMPLETE_ERROR             0x0008
+#define PHY84858_STATUS_CMD_SYSTEM_BUSY                        0xbbbb
+
 
 /* Warpcore clause 45 addressing */
 #define MDIO_WC_DEVAD                                  0x3
index 4ad415ac8cfe4a56ffd00858d1f70d9f1ab01456..c9bd7f16018e7616b8cedf03c78c41dce058aa79 100644 (file)
@@ -1,15 +1,17 @@
-/* bnx2x_sp.c: Broadcom Everest network driver.
+/* bnx2x_sp.c: Qlogic Everest network driver.
  *
- * Copyright (c) 2011-2013 Broadcom Corporation
+ * Copyright 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and Qlogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -355,6 +357,23 @@ static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
 
        return vp->get(vp, 1);
 }
+
+static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+       struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+       struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+       if (!mp->get(mp, 1))
+               return false;
+
+       if (!vp->get(vp, 1)) {
+               mp->put(mp, 1);
+               return false;
+       }
+
+       return true;
+}
+
 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
 {
        struct bnx2x_credit_pool_obj *mp = o->macs_pool;
@@ -383,6 +402,22 @@ static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
        return vp->put(vp, 1);
 }
 
+static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+       struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+       struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+       if (!mp->put(mp, 1))
+               return false;
+
+       if (!vp->put(vp, 1)) {
+               mp->get(mp, 1);
+               return false;
+       }
+
+       return true;
+}
+
 /**
  * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
  *
@@ -636,6 +671,26 @@ static int bnx2x_check_vlan_add(struct bnx2x *bp,
        return 0;
 }
 
+static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
+                                   struct bnx2x_vlan_mac_obj *o,
+                                  union bnx2x_classification_ramrod_data *data)
+{
+       struct bnx2x_vlan_mac_registry_elem *pos;
+
+       DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
+          data->vlan_mac.mac, data->vlan_mac.vlan);
+
+       list_for_each_entry(pos, &o->head, link)
+               if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+                   (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+                                 ETH_ALEN)) &&
+                   (data->vlan_mac.is_inner_mac ==
+                    pos->u.vlan_mac.is_inner_mac))
+                       return -EEXIST;
+
+       return 0;
+}
+
 /* check_del() callbacks */
 static struct bnx2x_vlan_mac_registry_elem *
        bnx2x_check_mac_del(struct bnx2x *bp,
@@ -670,6 +725,27 @@ static struct bnx2x_vlan_mac_registry_elem *
        return NULL;
 }
 
+static struct bnx2x_vlan_mac_registry_elem *
+       bnx2x_check_vlan_mac_del(struct bnx2x *bp,
+                                struct bnx2x_vlan_mac_obj *o,
+                                union bnx2x_classification_ramrod_data *data)
+{
+       struct bnx2x_vlan_mac_registry_elem *pos;
+
+       DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
+          data->vlan_mac.mac, data->vlan_mac.vlan);
+
+       list_for_each_entry(pos, &o->head, link)
+               if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+                   (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+                            ETH_ALEN)) &&
+                   (data->vlan_mac.is_inner_mac ==
+                    pos->u.vlan_mac.is_inner_mac))
+                       return pos;
+
+       return NULL;
+}
+
 /* check_move() callback */
 static bool bnx2x_check_move(struct bnx2x *bp,
                             struct bnx2x_vlan_mac_obj *src_o,
@@ -1036,6 +1112,96 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
                                        rule_cnt);
 }
 
+static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
+                                     struct bnx2x_vlan_mac_obj *o,
+                                     struct bnx2x_exeq_elem *elem,
+                                     int rule_idx, int cam_offset)
+{
+       struct bnx2x_raw_obj *raw = &o->raw;
+       struct eth_classify_rules_ramrod_data *data =
+               (struct eth_classify_rules_ramrod_data *)(raw->rdata);
+       int rule_cnt = rule_idx + 1;
+       union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+       enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd;
+       bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+       u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
+       u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
+       u16 inner_mac;
+
+       /* Reset the ramrod data buffer for the first rule */
+       if (rule_idx == 0)
+               memset(data, 0, sizeof(*data));
+
+       /* Set a rule header */
+       bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
+                                     &rule_entry->pair.header);
+
+       /* Set VLAN and MAC themselves */
+       rule_entry->pair.vlan = cpu_to_le16(vlan);
+       bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+                             &rule_entry->pair.mac_mid,
+                             &rule_entry->pair.mac_lsb, mac);
+       inner_mac = elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac;
+       rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
+       /* MOVE: Add a rule that will add this MAC/VLAN to the target Queue */
+       if (cmd == BNX2X_VLAN_MAC_MOVE) {
+               struct bnx2x_vlan_mac_obj *target_obj;
+
+               rule_entry++;
+               rule_cnt++;
+
+               /* Setup ramrod data */
+               target_obj = elem->cmd_data.vlan_mac.target_obj;
+               bnx2x_vlan_mac_set_cmd_hdr_e2(bp, target_obj,
+                                             true, CLASSIFY_RULE_OPCODE_PAIR,
+                                             &rule_entry->pair.header);
+
+               /* Set a VLAN itself */
+               rule_entry->pair.vlan = cpu_to_le16(vlan);
+               bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+                                     &rule_entry->pair.mac_mid,
+                                     &rule_entry->pair.mac_lsb, mac);
+               rule_entry->pair.inner_mac = cpu_to_le16(inner_mac);
+       }
+
+       /* Set the ramrod data header */
+       bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+                                       rule_cnt);
+}
+
+/**
+ * bnx2x_set_one_vlan_mac_e1h -
+ *
+ * @bp:                device handle
+ * @o:         bnx2x_vlan_mac_obj
+ * @elem:      bnx2x_exeq_elem
+ * @rule_idx:  rule_idx
+ * @cam_offset:        cam_offset
+ */
+static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
+                                      struct bnx2x_vlan_mac_obj *o,
+                                      struct bnx2x_exeq_elem *elem,
+                                      int rule_idx, int cam_offset)
+{
+       struct bnx2x_raw_obj *raw = &o->raw;
+       struct mac_configuration_cmd *config =
+               (struct mac_configuration_cmd *)(raw->rdata);
+       /* 57710 and 57711 do not support MOVE command,
+        * so it's either ADD or DEL
+        */
+       bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+               true : false;
+
+       /* Reset the ramrod data buffer */
+       memset(config, 0, sizeof(*config));
+
+       bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
+                                    cam_offset, add,
+                                    elem->cmd_data.vlan_mac.u.vlan_mac.mac,
+                                    elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
+                                    ETH_VLAN_FILTER_CLASSIFY, config);
+}
+
 /**
  * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
  *
@@ -1135,6 +1301,25 @@ static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
        return NULL;
 }
 
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
+       struct bnx2x_exe_queue_obj *o,
+       struct bnx2x_exeq_elem *elem)
+{
+       struct bnx2x_exeq_elem *pos;
+       struct bnx2x_vlan_mac_ramrod_data *data =
+               &elem->cmd_data.vlan_mac.u.vlan_mac;
+
+       /* Check pending for execution commands */
+       list_for_each_entry(pos, &o->exe_queue, link)
+               if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
+                           sizeof(*data)) &&
+                   (pos->cmd_data.vlan_mac.cmd ==
+                    elem->cmd_data.vlan_mac.cmd))
+                       return pos;
+
+       return NULL;
+}
+
 /**
  * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
  *
@@ -2042,6 +2227,68 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
        }
 }
 
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+                            struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+                            u8 cl_id, u32 cid, u8 func_id, void *rdata,
+                            dma_addr_t rdata_mapping, int state,
+                            unsigned long *pstate, bnx2x_obj_type type,
+                            struct bnx2x_credit_pool_obj *macs_pool,
+                            struct bnx2x_credit_pool_obj *vlans_pool)
+{
+       union bnx2x_qable_obj *qable_obj =
+               (union bnx2x_qable_obj *)vlan_mac_obj;
+
+       bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
+                                  rdata_mapping, state, pstate, type,
+                                  macs_pool, vlans_pool);
+
+       /* CAM pool handling */
+       vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
+       vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
+       /* CAM offset is relevant for 57710 and 57711 chips only which have a
+        * single CAM for both MACs and VLAN-MAC pairs. So the offset
+        * will be taken from MACs' pool object only.
+        */
+       vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
+       vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
+
+       if (CHIP_IS_E1(bp)) {
+               BNX2X_ERR("Do not support chips others than E2\n");
+               BUG();
+       } else if (CHIP_IS_E1H(bp)) {
+               vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e1h;
+               vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
+               vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
+               vlan_mac_obj->check_move        = bnx2x_check_move_always_err;
+               vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
+
+               /* Exe Queue */
+               bnx2x_exe_queue_init(bp,
+                                    &vlan_mac_obj->exe_queue, 1, qable_obj,
+                                    bnx2x_validate_vlan_mac,
+                                    bnx2x_remove_vlan_mac,
+                                    bnx2x_optimize_vlan_mac,
+                                    bnx2x_execute_vlan_mac,
+                                    bnx2x_exeq_get_vlan_mac);
+       } else {
+               vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e2;
+               vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
+               vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
+               vlan_mac_obj->check_move        = bnx2x_check_move;
+               vlan_mac_obj->ramrod_cmd        =
+                       RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+               /* Exe Queue */
+               bnx2x_exe_queue_init(bp,
+                                    &vlan_mac_obj->exe_queue,
+                                    CLASSIFY_RULES_COUNT,
+                                    qable_obj, bnx2x_validate_vlan_mac,
+                                    bnx2x_remove_vlan_mac,
+                                    bnx2x_optimize_vlan_mac,
+                                    bnx2x_execute_vlan_mac,
+                                    bnx2x_exeq_get_vlan_mac);
+       }
+}
 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
                        struct tstorm_eth_mac_filter_config *mac_filters,
@@ -3854,8 +4101,8 @@ static bool bnx2x_credit_pool_get_entry_always_true(
  * If credit is negative pool operations will always succeed (unlimited pool).
  *
  */
-static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
-                                         int base, int credit)
+void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+                           int base, int credit)
 {
        /* Zero the object first */
        memset(p, 0, sizeof(*p));
@@ -3934,9 +4181,9 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
                /* CAM credit is equaly divided between all active functions
                 * on the PATH.
                 */
-               if ((func_num > 0)) {
+               if (func_num > 0) {
                        if (!CHIP_REV_IS_SLOW(bp))
-                               cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
+                               cam_sz = PF_MAC_CREDIT_E2(bp, func_num);
                        else
                                cam_sz = BNX2X_CAM_SIZE_EMUL;
 
@@ -3966,8 +4213,9 @@ void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
                 * on the PATH.
                 */
                if (func_num > 0) {
-                       int credit = MAX_VLAN_CREDIT_E2 / func_num;
-                       bnx2x_init_credit_pool(p, func_id * credit, credit);
+                       int credit = PF_VLAN_CREDIT_E2(bp, func_num);
+
+                       bnx2x_init_credit_pool(p, -1/*unused for E2*/, credit);
                } else
                        /* this should never happen! Block VLAN operations. */
                        bnx2x_init_credit_pool(p, 0, 0);
@@ -4060,8 +4308,14 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
        if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
                caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
 
-       if (test_bit(BNX2X_RSS_GRE_INNER_HDRS, &p->rss_flags))
-               caps |= ETH_RSS_UPDATE_RAMROD_DATA_GRE_INNER_HDRS_CAPABILITY;
+       if (test_bit(BNX2X_RSS_IPV4_VXLAN, &p->rss_flags))
+               caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_VXLAN_CAPABILITY;
+
+       if (test_bit(BNX2X_RSS_IPV6_VXLAN, &p->rss_flags))
+               caps |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_VXLAN_CAPABILITY;
+
+       if (test_bit(BNX2X_RSS_TUNN_INNER_HDRS, &p->rss_flags))
+               caps |= ETH_RSS_UPDATE_RAMROD_DATA_TUNN_INNER_HDRS_CAPABILITY;
 
        /* RSS keys */
        if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
@@ -5669,10 +5923,14 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
        rdata->sd_vlan_tag      = cpu_to_le16(start_params->sd_vlan_tag);
        rdata->path_id          = BP_PATH(bp);
        rdata->network_cos_mode = start_params->network_cos_mode;
-       rdata->tunnel_mode      = start_params->tunnel_mode;
-       rdata->gre_tunnel_type  = start_params->gre_tunnel_type;
-       rdata->inner_gre_rss_en = start_params->inner_gre_rss_en;
-       rdata->vxlan_dst_port   = cpu_to_le16(4789);
+
+       rdata->vxlan_dst_port   = cpu_to_le16(start_params->vxlan_dst_port);
+       rdata->geneve_dst_port  = cpu_to_le16(start_params->geneve_dst_port);
+       rdata->inner_clss_l2gre = start_params->inner_clss_l2gre;
+       rdata->inner_clss_l2geneve = start_params->inner_clss_l2geneve;
+       rdata->inner_clss_vxlan = start_params->inner_clss_vxlan;
+       rdata->inner_rss        = start_params->inner_rss;
+
        rdata->sd_accept_mf_clss_fail = start_params->class_fail;
        if (start_params->class_fail_ethtype) {
                rdata->sd_accept_mf_clss_fail_match_ethtype = 1;
@@ -5690,6 +5948,14 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
                        cpu_to_le16(0x8100);
 
        rdata->no_added_tags = start_params->no_added_tags;
+
+       rdata->c2s_pri_tt_valid = start_params->c2s_pri_valid;
+       if (rdata->c2s_pri_tt_valid) {
+               memcpy(rdata->c2s_pri_trans_table.val,
+                      start_params->c2s_pri,
+                      MAX_VLAN_PRIORITIES);
+               rdata->c2s_pri_default = start_params->c2s_pri_default;
+       }
        /* No need for an explicit memory barrier here as long we would
         * need to ensure the ordering of writing to the SPQ element
         * and updating of the SPQ producer which involves a memory
@@ -5750,15 +6016,22 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
        if (test_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
                     &switch_update_params->changes)) {
                rdata->update_tunn_cfg_flg = 1;
-               if (test_bit(BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
+               if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
+                            &switch_update_params->changes))
+                       rdata->inner_clss_l2gre = 1;
+               if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
+                            &switch_update_params->changes))
+                       rdata->inner_clss_vxlan = 1;
+               if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
                             &switch_update_params->changes))
-                       rdata->tunn_clss_en = 1;
-               if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
+                       rdata->inner_clss_l2geneve = 1;
+               if (test_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
                             &switch_update_params->changes))
-                       rdata->inner_gre_rss_en = 1;
-               rdata->tunnel_mode = switch_update_params->tunnel_mode;
-               rdata->gre_tunnel_type = switch_update_params->gre_tunnel_type;
-               rdata->vxlan_dst_port = cpu_to_le16(4789);
+                       rdata->inner_rss = 1;
+               rdata->vxlan_dst_port =
+                       cpu_to_le16(switch_update_params->vxlan_dst_port);
+               rdata->geneve_dst_port =
+                       cpu_to_le16(switch_update_params->geneve_dst_port);
        }
 
        rdata->echo = SWITCH_UPDATE;
@@ -5885,6 +6158,8 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
                rdata->traffic_type_to_priority_cos[i] =
                        tx_start_params->traffic_type_to_priority_cos[i];
 
+       for (i = 0; i < MAX_TRAFFIC_TYPES; i++)
+               rdata->dcb_outer_pri[i] = tx_start_params->dcb_outer_pri[i];
        /* No need for an explicit memory barrier here as long as we
         * ensure the ordering of writing to the SPQ element
         * and updating of the SPQ producer which involves a memory
index 86baecb7c60c41a3cf096ad884efe674fb9221b3..4048fc594cce53f183d7c4d840dd4a0b38c80404 100644 (file)
@@ -1,15 +1,17 @@
-/* bnx2x_sp.h: Broadcom Everest network driver.
+/* bnx2x_sp.h: Qlogic Everest network driver.
  *
- * Copyright (c) 2011-2013 Broadcom Corporation
+ * Copyright 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and Qlogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * at http://www.gnu.org/licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other Qlogic software provided under a
+ * license other than the GPL, without Qlogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -711,7 +713,10 @@ enum {
        BNX2X_RSS_IPV6,
        BNX2X_RSS_IPV6_TCP,
        BNX2X_RSS_IPV6_UDP,
-       BNX2X_RSS_GRE_INNER_HDRS,
+
+       BNX2X_RSS_IPV4_VXLAN,
+       BNX2X_RSS_IPV6_VXLAN,
+       BNX2X_RSS_TUNN_INNER_HDRS,
 };
 
 struct bnx2x_config_rss_params {
@@ -1105,8 +1110,10 @@ enum {
        BNX2X_F_UPDATE_VLAN_FORCE_PRIO_CHNG,
        BNX2X_F_UPDATE_VLAN_FORCE_PRIO_FLAG,
        BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
-       BNX2X_F_UPDATE_TUNNEL_CLSS_EN,
-       BNX2X_F_UPDATE_TUNNEL_INNER_GRE_RSS_EN,
+       BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GRE,
+       BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_VXLAN,
+       BNX2X_F_UPDATE_TUNNEL_INNER_CLSS_L2GENEVE,
+       BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
 };
 
 /* Allowed Function states */
@@ -1171,19 +1178,23 @@ struct bnx2x_func_start_params {
        /* Function cos mode */
        u8 network_cos_mode;
 
-       /* TUNN_MODE_NONE/TUNN_MODE_VXLAN/TUNN_MODE_GRE */
-       u8 tunnel_mode;
+       /* UDP dest port for VXLAN */
+       u16 vxlan_dst_port;
 
-       /* tunneling classification enablement */
-       u8 tunn_clss_en;
+       /* UDP dest port for Geneve */
+       u16 geneve_dst_port;
 
-       /* NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
-       u8 gre_tunnel_type;
+       /* Enable inner Rx classifications for L2GRE packets */
+       u8 inner_clss_l2gre;
 
-       /* Enables Inner GRE RSS on the function, depends on the client RSS
-        * capailities
-        */
-       u8 inner_gre_rss_en;
+       /* Enable inner Rx classifications for L2-Geneve packets */
+       u8 inner_clss_l2geneve;
+
+       /* Enable inner Rx classification for vxlan packets */
+       u8 inner_clss_vxlan;
+
+       /* Enable RSS according to inner header */
+       u8 inner_rss;
 
        /* Allows accepting of packets failing MF classification, possibly
         * only matching a given ethertype
@@ -1200,6 +1211,11 @@ struct bnx2x_func_start_params {
 
        /* Prevent inner vlans from being added by FW */
        u8 no_added_tags;
+
+       /* Inner-to-Outer vlan priority mapping */
+       u8 c2s_pri[MAX_VLAN_PRIORITIES];
+       u8 c2s_pri_default;
+       u8 c2s_pri_valid;
 };
 
 struct bnx2x_func_switch_update_params {
@@ -1207,8 +1223,8 @@ struct bnx2x_func_switch_update_params {
        u16 vlan;
        u16 vlan_eth_type;
        u8 vlan_force_prio;
-       u8 tunnel_mode;
-       u8 gre_tunnel_type;
+       u16 vxlan_dst_port;
+       u16 geneve_dst_port;
 };
 
 struct bnx2x_func_afex_update_params {
@@ -1229,6 +1245,7 @@ struct bnx2x_func_tx_start_params {
        u8 dcb_enabled;
        u8 dcb_version;
        u8 dont_add_pri_0_en;
+       u8 dcb_outer_pri[MAX_TRAFFIC_TYPES];
 };
 
 struct bnx2x_func_set_timesync_params {
@@ -1396,6 +1413,14 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
                         unsigned long *pstate, bnx2x_obj_type type,
                         struct bnx2x_credit_pool_obj *vlans_pool);
 
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+                            struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+                            u8 cl_id, u32 cid, u8 func_id, void *rdata,
+                            dma_addr_t rdata_mapping, int state,
+                            unsigned long *pstate, bnx2x_obj_type type,
+                            struct bnx2x_credit_pool_obj *macs_pool,
+                            struct bnx2x_credit_pool_obj *vlans_pool);
+
 int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
                                        struct bnx2x_vlan_mac_obj *o);
 void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
@@ -1466,6 +1491,8 @@ void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
                                 struct bnx2x_credit_pool_obj *p, u8 func_id,
                                 u8 func_num);
+void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+                           int base, int credit);
 
 /****************** RSS CONFIGURATION ****************/
 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
@@ -1493,4 +1520,12 @@ int bnx2x_config_rss(struct bnx2x *bp,
 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
                             u8 *ind_table);
 
+#define PF_MAC_CREDIT_E2(bp, func_num)                                 \
+       ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \
+        func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT)
+
+#define PF_VLAN_CREDIT_E2(bp, func_num)                                         \
+       ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) / \
+        func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT)
+
 #endif /* BNX2X_SP_VERBS */
index f67348d169667376d47f082fc2da1eaf58843f1e..ec82831f507118ef2cebc58f32262b13c55e7345 100644 (file)
@@ -1,15 +1,17 @@
-/* bnx2x_sriov.c: Broadcom Everest network driver.
+/* bnx2x_sriov.c: QLogic Everest network driver.
  *
  * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -195,14 +197,6 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
        setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
        setup_p->gen_params.fp_hsi = vf->fp_hsi;
 
-       /* Setup-op pause params:
-        * Nothing to do, the pause thresholds are set by default to 0 which
-        * effectively turns off the feature for this queue. We don't want
-        * one queue (VF) to interfering with another queue (another VF)
-        */
-       if (vf->cfg_flags & VF_CFG_FW_FC)
-               BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
-                         vf->abs_vfid);
        /* Setup-op flags:
         * collect statistics, zero statistics, local-switching, security,
         * OV for Flex10, RSS and MCAST for leading
@@ -358,22 +352,24 @@ static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
 }
 
 static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
-                                  int qid, bool drv_only, bool mac)
+                                  int qid, bool drv_only, int type)
 {
        struct bnx2x_vlan_mac_ramrod_params ramrod;
        int rc;
 
        DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
-          mac ? "MACs" : "VLANs");
+                         (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
+                         (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
 
        /* Prepare ramrod params */
        memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
-       if (mac) {
+       if (type == BNX2X_VF_FILTER_VLAN_MAC) {
+               set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+               ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
+       } else if (type == BNX2X_VF_FILTER_MAC) {
                set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
        } else {
-               set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
-                       &ramrod.user_req.vlan_mac_flags);
                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
        }
        ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
@@ -391,14 +387,11 @@ static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
                                             &ramrod.ramrod_flags);
        if (rc) {
                BNX2X_ERR("Failed to delete all %s\n",
-                         mac ? "MACs" : "VLANs");
+                         (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
+                         (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
                return rc;
        }
 
-       /* Clear the vlan counters */
-       if (!mac)
-               atomic_set(&bnx2x_vfq(vf, qid, vlan_count), 0);
-
        return 0;
 }
 
@@ -412,13 +405,17 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
 
        DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
           vf->abs_vfid, filter->add ? "Adding" : "Deleting",
-          filter->type == BNX2X_VF_FILTER_MAC ? "MAC" : "VLAN");
+          (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MAC" :
+          (filter->type == BNX2X_VF_FILTER_MAC) ? "MAC" : "VLAN");
 
        /* Prepare ramrod params */
        memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
-       if (filter->type == BNX2X_VF_FILTER_VLAN) {
-               set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
-                       &ramrod.user_req.vlan_mac_flags);
+       if (filter->type == BNX2X_VF_FILTER_VLAN_MAC) {
+               ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
+               ramrod.user_req.u.vlan.vlan = filter->vid;
+               memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
+               set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
+       } else if (filter->type == BNX2X_VF_FILTER_VLAN) {
                ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
                ramrod.user_req.u.vlan.vlan = filter->vid;
        } else {
@@ -429,16 +426,6 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
        ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
                                            BNX2X_VLAN_MAC_DEL;
 
-       /* Verify there are available vlan credits */
-       if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
-           (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
-            vf_vlan_rules_cnt(vf))) {
-               BNX2X_ERR("No credits for vlan [%d >= %d]\n",
-                         atomic_read(&bnx2x_vfq(vf, qid, vlan_count)),
-                         vf_vlan_rules_cnt(vf));
-               return -ENOMEM;
-       }
-
        set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
        if (drv_only)
                set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
@@ -450,16 +437,13 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
        if (rc && rc != -EEXIST) {
                BNX2X_ERR("Failed to %s %s\n",
                          filter->add ? "add" : "delete",
-                         filter->type == BNX2X_VF_FILTER_MAC ? "MAC" :
-                                                               "VLAN");
+                         (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
+                               "VLAN-MAC" :
+                         (filter->type == BNX2X_VF_FILTER_MAC) ?
+                               "MAC" : "VLAN");
                return rc;
        }
 
-       /* Update the vlan counters */
-       if (filter->type == BNX2X_VF_FILTER_VLAN)
-               bnx2x_vf_vlan_credit(bp, ramrod.vlan_mac_obj,
-                                    &bnx2x_vfq(vf, qid, vlan_count));
-
        return 0;
 }
 
@@ -511,21 +495,7 @@ int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
        if (rc)
                goto op_err;
 
-       /* Configure vlan0 for leading queue */
-       if (!qid) {
-               struct bnx2x_vf_mac_vlan_filter filter;
-
-               memset(&filter, 0, sizeof(struct bnx2x_vf_mac_vlan_filter));
-               filter.type = BNX2X_VF_FILTER_VLAN;
-               filter.add = true;
-               filter.vid = 0;
-               rc = bnx2x_vf_mac_vlan_config(bp, vf, qid, &filter, false);
-               if (rc)
-                       goto op_err;
-       }
-
        /* Schedule the configuration of any pending vlan filters */
-       vf->cfg_flags |= VF_CFG_VLAN;
        bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
                               BNX2X_MSG_IOV);
        return 0;
@@ -544,10 +514,16 @@ static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
        /* If needed, clean the filtering data base */
        if ((qid == LEADING_IDX) &&
            bnx2x_validate_vf_sp_objs(bp, vf, false)) {
-               rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, false);
+               rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+                                            BNX2X_VF_FILTER_VLAN_MAC);
+               if (rc)
+                       goto op_err;
+               rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+                                            BNX2X_VF_FILTER_VLAN);
                if (rc)
                        goto op_err;
-               rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true, true);
+               rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
+                                            BNX2X_VF_FILTER_MAC);
                if (rc)
                        goto op_err;
        }
@@ -680,11 +656,18 @@ int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
                /* Remove filtering if feasible */
                if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
                        rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
-                                                    false, false);
+                                                    false,
+                                                    BNX2X_VF_FILTER_VLAN_MAC);
+                       if (rc)
+                               goto op_err;
+                       rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
+                                                    false,
+                                                    BNX2X_VF_FILTER_VLAN);
                        if (rc)
                                goto op_err;
                        rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
-                                                    false, true);
+                                                    false,
+                                                    BNX2X_VF_FILTER_MAC);
                        if (rc)
                                goto op_err;
                        rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
@@ -765,8 +748,6 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
 
        val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
        val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
-       if (vf->cfg_flags & VF_CFG_INT_SIMD)
-               val |= IGU_VF_CONF_SINGLE_ISR_EN;
        val &= ~IGU_VF_CONF_PARENT_MASK;
        val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
        REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
@@ -845,29 +826,6 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
        return 0;
 }
 
-static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp,
-                                         struct bnx2x_virtf *vf,
-                                         int new)
-{
-       int num = vf_vlan_rules_cnt(vf);
-       int diff = new - num;
-       bool rc = true;
-
-       DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n",
-          vf->abs_vfid, new, num);
-
-       if (diff > 0)
-               rc = bp->vlans_pool.get(&bp->vlans_pool, diff);
-       else if (diff < 0)
-               rc = bp->vlans_pool.put(&bp->vlans_pool, -diff);
-
-       if (rc)
-               vf_vlan_rules_cnt(vf) = new;
-       else
-               DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n",
-                  vf->abs_vfid);
-}
-
 /* must be called after the number of PF queues and the number of VFs are
  * both known
  */
@@ -875,21 +833,13 @@ static void
 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
 {
        struct vf_pf_resc_request *resc = &vf->alloc_resc;
-       u16 vlan_count = 0;
 
        /* will be set only during VF-ACQUIRE */
        resc->num_rxqs = 0;
        resc->num_txqs = 0;
 
-       /* no credit calculations for macs (just yet) */
-       resc->num_mac_filters = 1;
-
-       /* divvy up vlan rules */
-       bnx2x_iov_re_set_vlan_filters(bp, vf, 0);
-       vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
-       vlan_count = 1 << ilog2(vlan_count);
-       bnx2x_iov_re_set_vlan_filters(bp, vf,
-                                     vlan_count / BNX2X_NR_VIRTFN(bp));
+       resc->num_mac_filters = VF_MAC_CREDIT_CNT;
+       resc->num_vlan_filters = VF_VLAN_CREDIT_CNT;
 
        /* no real limitation */
        resc->num_mc_filters = 0;
@@ -1338,6 +1288,9 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
 
        mutex_init(&bp->vfdb->bulletin_mutex);
 
+       if (SHMEM2_HAS(bp, sriov_switch_mode))
+               SHMEM2_WR(bp, sriov_switch_mode, SRIOV_SWITCH_MODE_VEB);
+
        return 0;
 failed:
        DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
@@ -1620,6 +1573,11 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
                vf->filter_state = 0;
                vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
 
+               bnx2x_init_credit_pool(&vf->vf_vlans_pool, 0,
+                                      vf_vlan_rules_cnt(vf));
+               bnx2x_init_credit_pool(&vf->vf_macs_pool, 0,
+                                      vf_mac_rules_cnt(vf));
+
                /*  init mcast object - This object will be re-initialized
                 *  during VF-ACQUIRE with the proper cl_id and cid.
                 *  It needs to be initialized here so that it can be safely
@@ -2032,12 +1990,11 @@ int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
        u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
        u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
 
-       /* Save a vlan filter for the Hypervisor */
        return ((req_resc->num_rxqs <= rxq_cnt) &&
                (req_resc->num_txqs <= txq_cnt) &&
                (req_resc->num_sbs <= vf_sb_count(vf))   &&
                (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
-               (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf)));
+               (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
 }
 
 /* CORE VF API */
@@ -2091,16 +2048,12 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
        vf_sb_count(vf) = resc->num_sbs;
        vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
        vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
-       if (resc->num_mac_filters)
-               vf_mac_rules_cnt(vf) = resc->num_mac_filters;
-       /* Add an additional vlan filter credit for the hypervisor */
-       bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);
 
        DP(BNX2X_MSG_IOV,
           "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
           vf_sb_count(vf), vf_rxq_count(vf),
           vf_txq_count(vf), vf_mac_rules_cnt(vf),
-          vf_vlan_rules_visible_cnt(vf));
+          vf_vlan_rules_cnt(vf));
 
        /* Initialize the queues */
        if (!vf->vfqs) {
@@ -2133,7 +2086,6 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
 {
        struct bnx2x_func_init_params func_init = {0};
-       u16 flags = 0;
        int i;
 
        /* the sb resources are initialized at this point, do the
@@ -2160,23 +2112,9 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
        /* reset IGU VF statistics: MSIX */
        REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
 
-       /* vf init */
-       if (vf->cfg_flags & VF_CFG_STATS)
-               flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
-
-       if (vf->cfg_flags & VF_CFG_TPA)
-               flags |= FUNC_FLG_TPA;
-
-       if (is_vf_multi(vf))
-               flags |= FUNC_FLG_RSS;
-
        /* function setup */
-       func_init.func_flgs = flags;
        func_init.pf_id = BP_FUNC(bp);
        func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
-       func_init.fw_stat_map = vf->fw_stat_map;
-       func_init.spq_map = vf->spq_map;
-       func_init.spq_prod = 0;
        bnx2x_func_init(bp, &func_init);
 
        /* Enable the vf */
@@ -2589,8 +2527,8 @@ void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
 
        DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
        for_each_vf(bp, vfidx) {
-       bulletin = BP_VF_BULLETIN(bp, vfidx);
-               if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
+               bulletin = BP_VF_BULLETIN(bp, vfidx);
+               if (bulletin->valid_bitmap & VLAN_VALID)
                        bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
        }
 }
@@ -2808,20 +2746,58 @@ out:
        return rc;
 }
 
-int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
+static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp,
+                                        struct bnx2x_virtf *vf, bool accept)
+{
+       struct bnx2x_rx_mode_ramrod_params rx_ramrod;
+       unsigned long accept_flags;
+
+       /* need to remove/add the VF's accept_any_vlan bit */
+       accept_flags = bnx2x_leading_vfq(vf, accept_flags);
+       if (accept)
+               set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+       else
+               clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+       bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
+                             accept_flags);
+       bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
+       bnx2x_config_rx_mode(bp, &rx_ramrod);
+}
+
+static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf,
+                                   u16 vlan, bool add)
 {
-       struct bnx2x_queue_state_params q_params = {NULL};
        struct bnx2x_vlan_mac_ramrod_params ramrod_param;
-       struct bnx2x_queue_update_params *update_params;
+       unsigned long ramrod_flags = 0;
+       int rc = 0;
+
+       /* configure the new vlan to device */
+       memset(&ramrod_param, 0, sizeof(ramrod_param));
+       __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+       ramrod_param.vlan_mac_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+       ramrod_param.ramrod_flags = ramrod_flags;
+       ramrod_param.user_req.u.vlan.vlan = vlan;
+       ramrod_param.user_req.cmd = add ? BNX2X_VLAN_MAC_ADD
+                                       : BNX2X_VLAN_MAC_DEL;
+       rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+       if (rc) {
+               BNX2X_ERR("failed to configure vlan\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
+{
        struct pf_vf_bulletin_content *bulletin = NULL;
-       struct bnx2x_rx_mode_ramrod_params rx_ramrod;
        struct bnx2x *bp = netdev_priv(dev);
        struct bnx2x_vlan_mac_obj *vlan_obj;
        unsigned long vlan_mac_flags = 0;
        unsigned long ramrod_flags = 0;
        struct bnx2x_virtf *vf = NULL;
-       unsigned long accept_flags;
-       int rc;
+       int i, rc;
 
        if (vlan > 4095) {
                BNX2X_ERR("illegal vlan value %d\n", vlan);
@@ -2850,6 +2826,10 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
                bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
        bulletin->vlan = vlan;
 
+       /* Post update on VF's bulletin board */
+       rc = bnx2x_post_vf_bulletin(bp, vfidx);
+       if (rc)
+               BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
        mutex_unlock(&bp->vfdb->bulletin_mutex);
 
        /* is vf initialized and queue set up? */
@@ -2876,84 +2856,76 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
                goto out;
        }
 
-       /* need to remove/add the VF's accept_any_vlan bit */
-       accept_flags = bnx2x_leading_vfq(vf, accept_flags);
-       if (vlan)
-               clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
-       else
-               set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
-
-       bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
-                             accept_flags);
-       bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
-       bnx2x_config_rx_mode(bp, &rx_ramrod);
+       /* clear accept_any_vlan when HV forces vlan, otherwise
+        * according to VF capabilities
+        */
+       if (vlan || !(vf->cfg_flags & VF_CFG_VLAN_FILTER))
+               bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan);
 
-       /* configure the new vlan to device */
-       memset(&ramrod_param, 0, sizeof(ramrod_param));
-       __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
-       ramrod_param.vlan_mac_obj = vlan_obj;
-       ramrod_param.ramrod_flags = ramrod_flags;
-       set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
-               &ramrod_param.user_req.vlan_mac_flags);
-       ramrod_param.user_req.u.vlan.vlan = vlan;
-       ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
-       rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
-       if (rc) {
-               BNX2X_ERR("failed to configure vlan\n");
-               rc =  -EINVAL;
+       rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true);
+       if (rc)
                goto out;
-       }
 
-       /* send queue update ramrod to configure default vlan and silent
-        * vlan removal
+       /* send queue update ramrods to configure default vlan and
+        * silent vlan removal
         */
-       __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
-       q_params.cmd = BNX2X_Q_CMD_UPDATE;
-       q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
-       update_params = &q_params.params.update;
-       __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
-                 &update_params->update_flags);
-       __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
-                 &update_params->update_flags);
-       if (vlan == 0) {
-               /* if vlan is 0 then we want to leave the VF traffic
-                * untagged, and leave the incoming traffic untouched
-                * (i.e. do not remove any vlan tags).
-                */
-               __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
-                           &update_params->update_flags);
-               __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
-                           &update_params->update_flags);
-       } else {
-               /* configure default vlan to vf queue and set silent
-                * vlan removal (the vf remains unaware of this vlan).
-                */
-               __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+       for_each_vfq(vf, i) {
+               struct bnx2x_queue_state_params q_params = {NULL};
+               struct bnx2x_queue_update_params *update_params;
+
+               q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
+
+               /* validate the Q is UP */
+               if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
+                   BNX2X_Q_LOGICAL_STATE_ACTIVE)
+                       continue;
+
+               __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+               q_params.cmd = BNX2X_Q_CMD_UPDATE;
+               update_params = &q_params.params.update;
+               __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
                          &update_params->update_flags);
-               __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+               __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
                          &update_params->update_flags);
-               update_params->def_vlan = vlan;
-               update_params->silent_removal_value =
-                       vlan & VLAN_VID_MASK;
-               update_params->silent_removal_mask = VLAN_VID_MASK;
-       }
+               if (vlan == 0) {
+                       /* if vlan is 0 then we want to leave the VF traffic
+                        * untagged, and leave the incoming traffic untouched
+                        * (i.e. do not remove any vlan tags).
+                        */
+                       __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+                                   &update_params->update_flags);
+                       __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+                                   &update_params->update_flags);
+               } else {
+                       /* configure default vlan to vf queue and set silent
+                        * vlan removal (the vf remains unaware of this vlan).
+                        */
+                       __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+                                 &update_params->update_flags);
+                       __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+                                 &update_params->update_flags);
+                       update_params->def_vlan = vlan;
+                       update_params->silent_removal_value =
+                               vlan & VLAN_VID_MASK;
+                       update_params->silent_removal_mask = VLAN_VID_MASK;
+               }
 
-       /* Update the Queue state */
-       rc = bnx2x_queue_state_change(bp, &q_params);
-       if (rc) {
-               BNX2X_ERR("Failed to configure default VLAN\n");
-               goto out;
+               /* Update the Queue state */
+               rc = bnx2x_queue_state_change(bp, &q_params);
+               if (rc) {
+                       BNX2X_ERR("Failed to configure default VLAN queue %d\n",
+                                 i);
+                       goto out;
+               }
        }
-
-
-       /* clear the flag indicating that this VF needs its vlan
-        * (will only be set if the HV configured the Vlan before vf was
-        * up and we were called because the VF came up later
-        */
 out:
-       vf->cfg_flags &= ~VF_CFG_VLAN;
        bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
 
+       if (rc)
+               DP(BNX2X_MSG_IOV,
+                  "updated VF[%d] vlan configuration (vlan = %d)\n",
+                  vfidx, vlan);
+
        return rc;
 }
 
index 66ee62a0401a86c9e44db1afdb9ebac0b7ebf49e..670a581ffabc7ee64b01e15170fac9bd194e5186 100644 (file)
@@ -1,15 +1,17 @@
-/* bnx2x_sriov.h: Broadcom Everest network driver.
+/* bnx2x_sriov.h: QLogic Everest network driver.
  *
  * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -75,7 +77,10 @@ struct bnx2x_vf_queue {
 
        /* VLANs object */
        struct bnx2x_vlan_mac_obj       vlan_obj;
-       atomic_t vlan_count;            /* 0 means vlan-0 is set  ~ untagged */
+
+       /* VLAN-MACs object */
+       struct bnx2x_vlan_mac_obj       vlan_mac_obj;
+
        unsigned long accept_flags;     /* last accept flags configured */
 
        /* Queue Slow-path State object */
@@ -103,8 +108,10 @@ struct bnx2x_virtf;
 
 struct bnx2x_vf_mac_vlan_filter {
        int type;
-#define BNX2X_VF_FILTER_MAC    1
-#define BNX2X_VF_FILTER_VLAN   2
+#define BNX2X_VF_FILTER_MAC    BIT(0)
+#define BNX2X_VF_FILTER_VLAN   BIT(1)
+#define BNX2X_VF_FILTER_VLAN_MAC \
+       (BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
 
        bool add;
        u8 *mac;
@@ -119,14 +126,9 @@ struct bnx2x_vf_mac_vlan_filters {
 /* vf context */
 struct bnx2x_virtf {
        u16 cfg_flags;
-#define VF_CFG_STATS           0x0001
-#define VF_CFG_FW_FC           0x0002
-#define VF_CFG_TPA             0x0004
-#define VF_CFG_INT_SIMD                0x0008
-#define VF_CACHE_LINE          0x0010
-#define VF_CFG_VLAN            0x0020
-#define VF_CFG_STATS_COALESCE  0x0040
-#define VF_CFG_EXT_BULLETIN    0x0080
+#define VF_CFG_STATS_COALESCE  0x1
+#define VF_CFG_EXT_BULLETIN    0x2
+#define VF_CFG_VLAN_FILTER     0x4
        u8 link_cfg;            /* IFLA_VF_LINK_STATE_AUTO
                                 * IFLA_VF_LINK_STATE_ENABLE
                                 * IFLA_VF_LINK_STATE_DISABLE
@@ -140,9 +142,8 @@ struct bnx2x_virtf {
        bool flr_clnup_stage;   /* true during flr cleanup */
 
        /* dma */
-       dma_addr_t fw_stat_map;         /* valid iff VF_CFG_STATS */
+       dma_addr_t fw_stat_map;
        u16 stats_stride;
-       dma_addr_t spq_map;
        dma_addr_t bulletin_map;
 
        /* Allocated resources counters. Before the VF is acquired, the
@@ -163,8 +164,6 @@ struct bnx2x_virtf {
 #define vf_mac_rules_cnt(vf)           ((vf)->alloc_resc.num_mac_filters)
 #define vf_vlan_rules_cnt(vf)          ((vf)->alloc_resc.num_vlan_filters)
 #define vf_mc_rules_cnt(vf)            ((vf)->alloc_resc.num_mc_filters)
-       /* Hide a single vlan filter credit for the hypervisor */
-#define vf_vlan_rules_visible_cnt(vf)  (vf_vlan_rules_cnt(vf) - 1)
 
        u8 sb_count;    /* actual number of SBs */
        u8 igu_base_id; /* base igu status block id */
@@ -207,6 +206,9 @@ struct bnx2x_virtf {
        enum channel_tlvs               op_current;
 
        u8 fp_hsi;
+
+       struct bnx2x_credit_pool_obj    vf_vlans_pool;
+       struct bnx2x_credit_pool_obj    vf_macs_pool;
 };
 
 #define BNX2X_NR_VIRTFN(bp)    ((bp)->vfdb->sriov.nr_virtfn)
@@ -230,6 +232,12 @@ struct bnx2x_virtf {
 #define FW_VF_HANDLE(abs_vfid) \
        (abs_vfid + FW_PF_MAX_HANDLE)
 
+#define GET_NUM_VFS_PER_PATH(bp)       64 /* use max possible value */
+#define GET_NUM_VFS_PER_PF(bp)         ((bp)->vfdb ? (bp)->vfdb->sriov.total \
+                                                   : 0)
+#define VF_MAC_CREDIT_CNT              1
+#define VF_VLAN_CREDIT_CNT             2 /* VLAN0 + 'real' VLAN */
+
 /* locking and unlocking the channel mutex */
 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
                              enum channel_tlvs tlv);
@@ -273,6 +281,10 @@ struct bnx2x_vf_sp {
                struct eth_classify_rules_ramrod_data   e2;
        } vlan_rdata;
 
+       union {
+               struct eth_classify_rules_ramrod_data   e2;
+       } vlan_mac_rdata;
+
        union {
                struct eth_filter_rules_ramrod_data     e2;
        } rx_mode_rdata;
@@ -536,8 +548,14 @@ int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx);
 
 int bnx2x_set_vf_link_state(struct net_device *dev, int vf, int link_state);
 
+int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add);
 #else /* CONFIG_BNX2X_SRIOV */
 
+#define GET_NUM_VFS_PER_PATH(bp)       0
+#define GET_NUM_VFS_PER_PF(bp)         0
+#define VF_MAC_CREDIT_CNT              0
+#define VF_VLAN_CREDIT_CNT             0
+
 static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
                                struct bnx2x_queue_sp_obj **q_obj) {}
 static inline void bnx2x_vf_handle_flr_event(struct bnx2x *bp) {}
@@ -604,5 +622,7 @@ struct pf_vf_bulletin_content;
 static inline void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
                                              bool support_long) {}
 
+static inline int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add) {return 0; }
+
 #endif /* CONFIG_BNX2X_SRIOV */
 #endif /* bnx2x_sriov.h */
index 69d699f0730a3bd4d8980607e0a36cd8da461f1e..7e0919aa450e754d444bc7fb54ec4d2375fabcb8 100644 (file)
@@ -1,6 +1,8 @@
-/* bnx2x_stats.c: Broadcom Everest network driver.
+/* bnx2x_stats.c: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 965539a9dabe7e4702e1ba6b382aefc69fb26fac..b2644ed13d064eacc3b34cf59d48b156bedac16b 100644 (file)
@@ -1,6 +1,8 @@
-/* bnx2x_stats.h: Broadcom Everest network driver.
+/* bnx2x_stats.h: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 06b8c0d8fd3b12ab4e864c8c0971cc52380c007c..1374e5394a7970ba20ad54ddfc6271ce09e40744 100644 (file)
@@ -1,15 +1,17 @@
-/* bnx2x_vfpf.c: Broadcom Everest network driver.
+/* bnx2x_vfpf.c: QLogic Everest network driver.
  *
  * Copyright 2009-2013 Broadcom Corporation
+ * Copyright 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and QLogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
  * under the terms of the GNU General Public License version 2, available
  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
  *
  * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
+ * software in any way with any other QLogic software provided under a
+ * license other than the GPL, without QLogic's express prior written
  * consent.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
@@ -245,6 +247,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
        req->resc_request.num_sbs = bp->igu_sb_cnt;
        req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
        req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
+       req->resc_request.num_vlan_filters = VF_ACQUIRE_VLAN_FILTERS;
 
        /* pf 2 vf bulletin board address */
        req->bulletin_addr = bp->pf2vf_bulletin_mapping;
@@ -255,6 +258,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
 
        /* Bulletin support for bulletin board with length > legacy length */
        req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN;
+       /* vlan filtering is supported */
+       req->vfdev_info.caps |= VF_CAP_SUPPORT_VLAN_FILTER;
 
        /* add list termination tlv */
        bnx2x_add_tlv(bp, req,
@@ -373,6 +378,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
                NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
        bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
        bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
+       bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters;
+
        strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
                sizeof(bp->fw_ver));
 
@@ -546,7 +553,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
                           BNX2X_FILTER_MAC_PENDING,
                           &vf->filter_state,
                           BNX2X_OBJ_TYPE_RX_TX,
-                          &bp->macs_pool);
+                          &vf->vf_macs_pool);
        /* vlan */
        bnx2x_init_vlan_obj(bp, &q->vlan_obj,
                            cl_id, q->cid, func_id,
@@ -555,8 +562,17 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
                            BNX2X_FILTER_VLAN_PENDING,
                            &vf->filter_state,
                            BNX2X_OBJ_TYPE_RX_TX,
-                           &bp->vlans_pool);
-
+                           &vf->vf_vlans_pool);
+       /* vlan-mac */
+       bnx2x_init_vlan_mac_obj(bp, &q->vlan_mac_obj,
+                               cl_id, q->cid, func_id,
+                               bnx2x_vf_sp(bp, vf, vlan_mac_rdata),
+                               bnx2x_vf_sp_map(bp, vf, vlan_mac_rdata),
+                               BNX2X_FILTER_VLAN_MAC_PENDING,
+                               &vf->filter_state,
+                               BNX2X_OBJ_TYPE_RX_TX,
+                               &vf->vf_macs_pool,
+                               &vf->vf_vlans_pool);
        /* mcast */
        bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
                             q->cid, func_id, func_id,
@@ -723,7 +739,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
 
        req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
        if (set)
-               req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC;
+               req->filters[0].flags |= VFPF_Q_FILTER_SET;
 
        /* sample bulletin board for new mac */
        bnx2x_sample_bulletin(bp);
@@ -911,6 +927,67 @@ out:
        return 0;
 }
 
+/* request pf to add a vlan for the vf */
+int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
+{
+       struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+       struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+       int rc = 0;
+
+       if (!(bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER)) {
+               DP(BNX2X_MSG_IOV, "HV does not support vlan filtering\n");
+               return 0;
+       }
+
+       /* clear mailbox and prep first tlv */
+       bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
+                       sizeof(*req));
+
+       req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
+       req->vf_qid = vf_qid;
+       req->n_mac_vlan_filters = 1;
+
+       req->filters[0].flags = VFPF_Q_FILTER_VLAN_TAG_VALID;
+
+       if (add)
+               req->filters[0].flags |= VFPF_Q_FILTER_SET;
+
+       /* sample bulletin board for hypervisor vlan */
+       bnx2x_sample_bulletin(bp);
+
+       if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
+               BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
+               rc = -EINVAL;
+               goto out;
+       }
+
+       req->filters[0].vlan_tag = vid;
+
+       /* add list termination tlv */
+       bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       /* output tlvs list */
+       bnx2x_dp_tlv_list(bp, req);
+
+       /* send message to pf */
+       rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+       if (rc) {
+               BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+               goto out;
+       }
+
+       if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+               BNX2X_ERR("vfpf %s VLAN %d failed\n", add ? "add" : "del",
+                         vid);
+               rc = -EINVAL;
+       }
+out:
+       bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+       return rc;
+}
+
 int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
 {
        int mode = bp->rx_mode;
@@ -934,8 +1011,13 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
                req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
                req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
                req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+               if (mode == BNX2X_RX_MODE_PROMISC)
+                       req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
        }
 
+       if (bp->accept_any_vlan)
+               req->rx_mask |= VFPF_RX_MASK_ACCEPT_ANY_VLAN;
+
        req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
        req->vf_qid = 0;
 
@@ -1188,7 +1270,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
        resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
        resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
                                   PFVF_CAP_TPA |
-                                  PFVF_CAP_TPA_UPDATE);
+                                  PFVF_CAP_TPA_UPDATE |
+                                  PFVF_CAP_VLAN_FILTER);
        bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
                          sizeof(resp->pfdev_info.fw_ver));
 
@@ -1203,7 +1286,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
                        bnx2x_vf_max_queue_cnt(bp, vf);
                resc->num_sbs = vf_sb_count(vf);
                resc->num_mac_filters = vf_mac_rules_cnt(vf);
-               resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf);
+               resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
                resc->num_mc_filters = 0;
 
                if (status == PFVF_STATUS_SUCCESS) {
@@ -1370,6 +1453,14 @@ static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
                vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN;
        }
 
+       if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_VLAN_FILTER) {
+               DP(BNX2X_MSG_IOV, "VF[%d] supports vlan filtering\n",
+                  vf->abs_vfid);
+               vf->cfg_flags |= VF_CFG_VLAN_FILTER;
+       } else {
+               vf->cfg_flags &= ~VF_CFG_VLAN_FILTER;
+       }
+
 out:
        /* response */
        bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
@@ -1382,7 +1473,6 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
        int rc;
 
        /* record ghost addresses from vf message */
-       vf->spq_map = init->spq_addr;
        vf->fw_stat_map = init->stats_addr;
        vf->stats_stride = init->stats_stride;
        rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
@@ -1578,17 +1668,18 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
 
                if ((msg_filter->flags & type_flag) != type_flag)
                        continue;
-               if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
+               memset(&fl->filters[j], 0, sizeof(fl->filters[j]));
+               if (type_flag & VFPF_Q_FILTER_DEST_MAC_VALID) {
                        fl->filters[j].mac = msg_filter->mac;
-                       fl->filters[j].type = BNX2X_VF_FILTER_MAC;
-               } else {
+                       fl->filters[j].type |= BNX2X_VF_FILTER_MAC;
+               }
+               if (type_flag & VFPF_Q_FILTER_VLAN_TAG_VALID) {
                        fl->filters[j].vid = msg_filter->vlan_tag;
-                       fl->filters[j].type = BNX2X_VF_FILTER_VLAN;
+                       fl->filters[j].type |= BNX2X_VF_FILTER_VLAN;
                }
-               fl->filters[j].add =
-                       (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
-                       true : false;
+               fl->filters[j].add = !!(msg_filter->flags & VFPF_Q_FILTER_SET);
                fl->count++;
+               j++;
        }
        if (!fl->count)
                kfree(fl);
@@ -1598,6 +1689,18 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
        return 0;
 }
 
+static int bnx2x_vf_filters_contain(struct vfpf_set_q_filters_tlv *filters,
+                                   u32 flags)
+{
+       int i, cnt = 0;
+
+       for (i = 0; i < filters->n_mac_vlan_filters; i++)
+               if  ((filters->filters[i].flags & flags) == flags)
+                       cnt++;
+
+       return cnt;
+}
+
 static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
                                       struct vfpf_q_mac_vlan_filter *filter)
 {
@@ -1629,6 +1732,7 @@ static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
 
 #define VFPF_MAC_FILTER                VFPF_Q_FILTER_DEST_MAC_VALID
 #define VFPF_VLAN_FILTER       VFPF_Q_FILTER_VLAN_TAG_VALID
+#define VFPF_VLAN_MAC_FILTER   (VFPF_VLAN_FILTER | VFPF_MAC_FILTER)
 
 static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
 {
@@ -1639,17 +1743,17 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
 
        /* check for any mac/vlan changes */
        if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
-               /* build mac list */
                struct bnx2x_vf_mac_vlan_filters *fl = NULL;
 
+               /* build vlan-mac list */
                rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
-                                              VFPF_MAC_FILTER);
+                                              VFPF_VLAN_MAC_FILTER);
                if (rc)
                        goto op_err;
 
                if (fl) {
 
-                       /* set mac list */
+                       /* set vlan-mac list */
                        rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
                                                           msg->vf_qid,
                                                           false);
@@ -1657,22 +1761,23 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
                                goto op_err;
                }
 
-               /* build vlan list */
+               /* build mac list */
                fl = NULL;
 
                rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
-                                              VFPF_VLAN_FILTER);
+                                              VFPF_MAC_FILTER);
                if (rc)
                        goto op_err;
 
                if (fl) {
-                       /* set vlan list */
+                       /* set mac list */
                        rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
                                                           msg->vf_qid,
                                                           false);
                        if (rc)
                                goto op_err;
                }
+
        }
 
        if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
@@ -1687,11 +1792,15 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
                        __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
                }
 
-               /* A packet arriving the vf's mac should be accepted
-                * with any vlan, unless a vlan has already been
-                * configured.
+               /* any_vlan is not configured if HV is forcing VLAN
+                * any_vlan is configured if
+                *   1. VF does not support vlan filtering
+                *   OR
+                *   2. VF supports vlan filtering and explicitly requested it
                 */
-               if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
+               if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)) &&
+                   (!(vf->cfg_flags & VF_CFG_VLAN_FILTER) ||
+                    msg->rx_mask & VFPF_RX_MASK_ACCEPT_ANY_VLAN))
                        __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
 
                /* set rx-mode */
@@ -1727,17 +1836,31 @@ static int bnx2x_filters_validate_mac(struct bnx2x *bp,
         * since queue was not set up.
         */
        if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
-               /* once a mac was set by ndo can only accept a single mac... */
-               if (filters->n_mac_vlan_filters > 1) {
-                       BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
-                                 vf->abs_vfid);
-                       rc = -EPERM;
-                       goto response;
+               struct vfpf_q_mac_vlan_filter *filter = NULL;
+               int i;
+
+               for (i = 0; i < filters->n_mac_vlan_filters; i++) {
+                       if (!(filters->filters[i].flags &
+                             VFPF_Q_FILTER_DEST_MAC_VALID))
+                               continue;
+
+                       /* once a mac was set by ndo can only accept
+                        * a single mac...
+                        */
+                       if (filter) {
+                               BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called [%d filters]\n",
+                                         vf->abs_vfid,
+                                         filters->n_mac_vlan_filters);
+                               rc = -EPERM;
+                               goto response;
+                       }
+
+                       filter = &filters->filters[i];
                }
 
                /* ...and only the mac set by the ndo */
-               if (filters->n_mac_vlan_filters == 1 &&
-                   !ether_addr_equal(filters->filters->mac, bulletin->mac)) {
+               if (filter &&
+                   !ether_addr_equal(filter->mac, bulletin->mac)) {
                        BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
                                  vf->abs_vfid);
 
@@ -1759,17 +1882,14 @@ static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
 
        /* if vlan was set by hypervisor we don't allow guest to config vlan */
        if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
-               int i;
-
                /* search for vlan filters */
-               for (i = 0; i < filters->n_mac_vlan_filters; i++) {
-                       if (filters->filters[i].flags &
-                           VFPF_Q_FILTER_VLAN_TAG_VALID) {
-                               BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
-                                         vf->abs_vfid);
-                               rc = -EPERM;
-                               goto response;
-                       }
+
+               if (bnx2x_vf_filters_contain(filters,
+                                            VFPF_Q_FILTER_VLAN_TAG_VALID)) {
+                       BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
+                                 vf->abs_vfid);
+                       rc = -EPERM;
+                       goto response;
                }
        }
 
index b86479fc0d2f80adc9a066da4e26ee9b50d80bc9..64f2b52c58293964ad55a77caa47990312d48df6 100644 (file)
@@ -1,16 +1,22 @@
-/* bnx2x_vfpf.h: Broadcom Everest network driver.
+/* bnx2x_vfpf.h: Qlogic Everest network driver.
  *
  * Copyright (c) 2011-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
- * Unless you and Broadcom execute a separate written software license
+ * Unless you and Qlogic execute a separate written software license
  * agreement governing use of this software, this software is licensed to you
- * under the terms of the GNU General Public License version 2, available
- * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ * under the terms of the GNU General Public License version 2 (the “GPL”),
+ * available at http://www.gnu.org/licenses/gpl-2.0.html, with the following
+ * added to such license:
  *
- * Notwithstanding the above, under no circumstances may you combine this
- * software in any way with any other Broadcom software provided under a
- * license other than the GPL, without Broadcom's express prior written
- * consent.
+ * As a special exception, the copyright holders of this software give you
+ * permission to link this software with independent modules, and to copy and
+ * distribute the resulting executable under terms of your choice, provided that
+ * you also meet, for each linked independent module, the terms and conditions
+ * of the license of that module.  An independent module is a module which is
+ * not derived from this software.  The special exception does not apply to any
+ * modifications of the software.
  *
  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  * Written by: Ariel Elior <ariel.elior@qlogic.com>
@@ -64,6 +70,8 @@ struct hw_sb_info {
 #define VFPF_RX_MASK_ACCEPT_ALL_UNICAST                0x00000004
 #define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST      0x00000008
 #define VFPF_RX_MASK_ACCEPT_BROADCAST          0x00000010
+#define VFPF_RX_MASK_ACCEPT_ANY_VLAN           0x00000020
+
 #define BULLETIN_CONTENT_SIZE          (sizeof(struct pf_vf_bulletin_content))
 #define BULLETIN_CONTENT_LEGACY_SIZE   (32)
 #define BULLETIN_ATTEMPTS      5 /* crc failures before throwing towel */
@@ -127,6 +135,7 @@ struct vfpf_acquire_tlv {
                u8 fp_hsi_ver;
                u8 caps;
 #define VF_CAP_SUPPORT_EXT_BULLETIN    (1 << 0)
+#define VF_CAP_SUPPORT_VLAN_FILTER     (1 << 1)
        } vfdev_info;
 
        struct vf_pf_resc_request resc_request;
@@ -168,10 +177,12 @@ struct pfvf_acquire_resp_tlv {
        struct pf_vf_pfdev_info {
                u32 chip_num;
                u32 pf_cap;
-#define PFVF_CAP_RSS           0x00000001
-#define PFVF_CAP_DHC           0x00000002
-#define PFVF_CAP_TPA           0x00000004
-#define PFVF_CAP_TPA_UPDATE    0x00000008
+#define PFVF_CAP_RSS          0x00000001
+#define PFVF_CAP_DHC          0x00000002
+#define PFVF_CAP_TPA          0x00000004
+#define PFVF_CAP_TPA_UPDATE   0x00000008
+#define PFVF_CAP_VLAN_FILTER  0x00000010
+
                char fw_ver[32];
                u16 db_size;
                u8  indices_per_sb;
@@ -288,7 +299,7 @@ struct vfpf_q_mac_vlan_filter {
        u32 flags;
 #define VFPF_Q_FILTER_DEST_MAC_VALID   0x01
 #define VFPF_Q_FILTER_VLAN_TAG_VALID   0x02
-#define VFPF_Q_FILTER_SET_MAC          0x100   /* set/clear */
+#define VFPF_Q_FILTER_SET              0x100   /* set/clear */
        u8  mac[ETH_ALEN];
        u16 vlan_tag;
 };
index 64c1e9db6b0b5420687c1c083cc20b8adb7de5bd..eb080ef8ee97fec11fe3c03eee9f9ce8cac8eeb7 100644 (file)
@@ -907,9 +907,8 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
        }
 
        bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
-
        if (mode == GENET_POWER_PASSIVE)
-               bcmgenet_mii_reset(priv->dev);
+               bcmgenet_phy_power_set(priv->dev, true);
 }
 
 /* ioctl handle special commands that are not present in ethtool. */
@@ -1725,7 +1724,7 @@ static int init_umac(struct bcmgenet_priv *priv)
        int0_enable |= UMAC_IRQ_TXDMA_DONE;
 
        /* Monitor cable plug/unplugged event for internal PHY */
-       if (phy_is_internal(priv->phydev)) {
+       if (priv->internal_phy) {
                int0_enable |= UMAC_IRQ_LINK_EVENT;
        } else if (priv->ext_phy) {
                int0_enable |= UMAC_IRQ_LINK_EVENT;
@@ -2389,6 +2388,23 @@ static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void bcmgenet_poll_controller(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+
+       /* Invoke the main RX/TX interrupt handler */
+       disable_irq(priv->irq0);
+       bcmgenet_isr0(priv->irq0, priv);
+       enable_irq(priv->irq0);
+
+       /* And the interrupt handler for RX/TX priority queues */
+       disable_irq(priv->irq1);
+       bcmgenet_isr1(priv->irq1, priv);
+       enable_irq(priv->irq1);
+}
+#endif
+
 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
 {
        u32 reg;
@@ -2626,13 +2642,12 @@ static int bcmgenet_open(struct net_device *dev)
        netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
 
        /* Turn on the clock */
-       if (!IS_ERR(priv->clk))
-               clk_prepare_enable(priv->clk);
+       clk_prepare_enable(priv->clk);
 
        /* If this is an internal GPHY, power it back on now, before UniMAC is
         * brought out of reset as absolutely no UniMAC activity is allowed
         */
-       if (phy_is_internal(priv->phydev))
+       if (priv->internal_phy)
                bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
 
        /* take MAC out of reset */
@@ -2651,7 +2666,7 @@ static int bcmgenet_open(struct net_device *dev)
 
        bcmgenet_set_hw_addr(priv, dev->dev_addr);
 
-       if (phy_is_internal(priv->phydev)) {
+       if (priv->internal_phy) {
                reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
                reg |= EXT_ENERGY_DET_MASK;
                bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
@@ -2687,23 +2702,24 @@ static int bcmgenet_open(struct net_device *dev)
                goto err_irq0;
        }
 
-       /* Re-configure the port multiplexer towards the PHY device */
-       bcmgenet_mii_config(priv->dev, false);
-
-       phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup,
-                          priv->phy_interface);
+       ret = bcmgenet_mii_probe(dev);
+       if (ret) {
+               netdev_err(dev, "failed to connect to PHY\n");
+               goto err_irq1;
+       }
 
        bcmgenet_netif_start(dev);
 
        return 0;
 
+err_irq1:
+       free_irq(priv->irq1, priv);
 err_irq0:
-       free_irq(priv->irq0, dev);
+       free_irq(priv->irq0, priv);
 err_fini_dma:
        bcmgenet_fini_dma(priv);
 err_clk_disable:
-       if (!IS_ERR(priv->clk))
-               clk_disable_unprepare(priv->clk);
+       clk_disable_unprepare(priv->clk);
        return ret;
 }
 
@@ -2757,11 +2773,10 @@ static int bcmgenet_close(struct net_device *dev)
        free_irq(priv->irq0, priv);
        free_irq(priv->irq1, priv);
 
-       if (phy_is_internal(priv->phydev))
+       if (priv->internal_phy)
                ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
 
-       if (!IS_ERR(priv->clk))
-               clk_disable_unprepare(priv->clk);
+       clk_disable_unprepare(priv->clk);
 
        return ret;
 }
@@ -2941,6 +2956,9 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
        .ndo_set_mac_address    = bcmgenet_set_mac_addr,
        .ndo_do_ioctl           = bcmgenet_ioctl,
        .ndo_set_features       = bcmgenet_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = bcmgenet_poll_controller,
+#endif
 };
 
 /* Array of GENET hardware parameters/characteristics */
@@ -3214,11 +3232,12 @@ static int bcmgenet_probe(struct platform_device *pdev)
                priv->version = pd->genet_version;
 
        priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
-       if (IS_ERR(priv->clk))
+       if (IS_ERR(priv->clk)) {
                dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
+               priv->clk = NULL;
+       }
 
-       if (!IS_ERR(priv->clk))
-               clk_prepare_enable(priv->clk);
+       clk_prepare_enable(priv->clk);
 
        bcmgenet_set_hw_params(priv);
 
@@ -3229,8 +3248,10 @@ static int bcmgenet_probe(struct platform_device *pdev)
        INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
 
        priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
-       if (IS_ERR(priv->clk_wol))
+       if (IS_ERR(priv->clk_wol)) {
                dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
+               priv->clk_wol = NULL;
+       }
 
        priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
        if (IS_ERR(priv->clk_eee)) {
@@ -3256,8 +3277,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
        netif_carrier_off(dev);
 
        /* Turn off the main clock, WOL clock is handled separately */
-       if (!IS_ERR(priv->clk))
-               clk_disable_unprepare(priv->clk);
+       clk_disable_unprepare(priv->clk);
 
        err = register_netdev(dev);
        if (err)
@@ -3266,8 +3286,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
        return err;
 
 err_clk_disable:
-       if (!IS_ERR(priv->clk))
-               clk_disable_unprepare(priv->clk);
+       clk_disable_unprepare(priv->clk);
 err:
        free_netdev(dev);
        return err;
@@ -3319,7 +3338,7 @@ static int bcmgenet_suspend(struct device *d)
        if (device_may_wakeup(d) && priv->wolopts) {
                ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
                clk_prepare_enable(priv->clk_wol);
-       } else if (phy_is_internal(priv->phydev)) {
+       } else if (priv->internal_phy) {
                ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
        }
 
@@ -3348,7 +3367,7 @@ static int bcmgenet_resume(struct device *d)
        /* If this is an internal GPHY, power it back on now, before UniMAC is
         * brought out of reset as absolutely no UniMAC activity is allowed
         */
-       if (phy_is_internal(priv->phydev))
+       if (priv->internal_phy)
                bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
 
        bcmgenet_umac_reset(priv);
@@ -3363,14 +3382,14 @@ static int bcmgenet_resume(struct device *d)
 
        phy_init_hw(priv->phydev);
        /* Speed settings must be restored */
-       bcmgenet_mii_config(priv->dev, false);
+       bcmgenet_mii_config(priv->dev);
 
        /* disable ethernet MAC while updating its registers */
        umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
 
        bcmgenet_set_hw_addr(priv, dev->dev_addr);
 
-       if (phy_is_internal(priv->phydev)) {
+       if (priv->internal_phy) {
                reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
                reg |= EXT_ENERGY_DET_MASK;
                bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
index 6159deab8c9850a0231ef3b3f1fad6dfaa31a588..7299d10754226680e71ace26cbe4f996c8867127 100644 (file)
@@ -593,6 +593,7 @@ struct bcmgenet_priv {
        /* MDIO bus variables */
        wait_queue_head_t wq;
        struct phy_device *phydev;
+       bool internal_phy;
        struct device_node *phy_dn;
        struct device_node *mdio_dn;
        struct mii_bus *mii_bus;
@@ -670,9 +671,9 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
 
 /* MDIO routines */
 int bcmgenet_mii_init(struct net_device *dev);
-int bcmgenet_mii_config(struct net_device *dev, bool init);
+int bcmgenet_mii_config(struct net_device *dev);
+int bcmgenet_mii_probe(struct net_device *dev);
 void bcmgenet_mii_exit(struct net_device *dev);
-void bcmgenet_mii_reset(struct net_device *dev);
 void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
 void bcmgenet_mii_setup(struct net_device *dev);
 
index adf23d2ac4888e89f63c4246e7c3b33eaf3d0fd0..b3679ad1c1c73a62bc2e4d30e39e7505cc39c48e 100644 (file)
@@ -163,14 +163,13 @@ void bcmgenet_mii_setup(struct net_device *dev)
        phy_print_status(phydev);
 }
 
-void bcmgenet_mii_reset(struct net_device *dev)
+static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
+                                         struct fixed_phy_status *status)
 {
-       struct bcmgenet_priv *priv = netdev_priv(dev);
+       if (dev && dev->phydev && status)
+               status->link = dev->phydev->link;
 
-       if (priv->phydev) {
-               phy_init_hw(priv->phydev);
-               phy_start_aneg(priv->phydev);
-       }
+       return 0;
 }
 
 void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
@@ -215,7 +214,6 @@ static void bcmgenet_internal_phy_setup(struct net_device *dev)
        reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
        reg |= EXT_PWR_DN_EN_LD;
        bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
-       bcmgenet_mii_reset(dev);
 }
 
 static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
@@ -226,9 +224,13 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
        reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
        reg |= LED_ACT_SOURCE_MAC;
        bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+
+       if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+               fixed_phy_set_link_update(priv->phydev,
+                                         bcmgenet_fixed_phy_link_update);
 }
 
-int bcmgenet_mii_config(struct net_device *dev, bool init)
+int bcmgenet_mii_config(struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        struct phy_device *phydev = priv->phydev;
@@ -238,10 +240,10 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
        u32 port_ctrl;
        u32 reg;
 
-       priv->ext_phy = !phy_is_internal(priv->phydev) &&
+       priv->ext_phy = !priv->internal_phy &&
                        (priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
 
-       if (phy_is_internal(priv->phydev))
+       if (priv->internal_phy)
                priv->phy_interface = PHY_INTERFACE_MODE_NA;
 
        switch (priv->phy_interface) {
@@ -259,7 +261,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
 
                bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
 
-               if (phy_is_internal(priv->phydev)) {
+               if (priv->internal_phy) {
                        phy_name = "internal PHY";
                        bcmgenet_internal_phy_setup(dev);
                } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
@@ -321,13 +323,12 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
                bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
        }
 
-       if (init)
-               dev_info(kdev, "configuring instance for %s\n", phy_name);
+       dev_info_once(kdev, "configuring instance for %s\n", phy_name);
 
        return 0;
 }
 
-static int bcmgenet_mii_probe(struct net_device *dev)
+int bcmgenet_mii_probe(struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        struct device_node *dn = priv->pdev->dev.of_node;
@@ -345,22 +346,6 @@ static int bcmgenet_mii_probe(struct net_device *dev)
        priv->old_pause = -1;
 
        if (dn) {
-               if (priv->phydev) {
-                       pr_info("PHY already attached\n");
-                       return 0;
-               }
-
-               /* In the case of a fixed PHY, the DT node associated
-                * to the PHY is the Ethernet MAC DT node.
-                */
-               if (!priv->phy_dn && of_phy_is_fixed_link(dn)) {
-                       ret = of_phy_register_fixed_link(dn);
-                       if (ret)
-                               return ret;
-
-                       priv->phy_dn = of_node_get(dn);
-               }
-
                phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
                                        phy_flags, priv->phy_interface);
                if (!phydev) {
@@ -386,7 +371,7 @@ static int bcmgenet_mii_probe(struct net_device *dev)
         * PHY speed which is needed for bcmgenet_mii_config() to configure
         * things appropriately.
         */
-       ret = bcmgenet_mii_config(dev, true);
+       ret = bcmgenet_mii_config(dev);
        if (ret) {
                phy_disconnect(priv->phydev);
                return ret;
@@ -397,14 +382,11 @@ static int bcmgenet_mii_probe(struct net_device *dev)
        /* The internal PHY has its link interrupts routed to the
         * Ethernet MAC ISRs
         */
-       if (phy_is_internal(priv->phydev))
+       if (priv->internal_phy)
                priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT;
        else
                priv->mii_bus->irq[phydev->addr] = PHY_POLL;
 
-       pr_info("attached PHY at address %d [%s]\n",
-               phydev->addr, phydev->drv->name);
-
        return 0;
 }
 
@@ -490,7 +472,10 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
 {
        struct device_node *dn = priv->pdev->dev.of_node;
        struct device *kdev = &priv->pdev->dev;
+       const char *phy_mode_str = NULL;
+       struct phy_device *phydev = NULL;
        char *compat;
+       int phy_mode;
        int ret;
 
        compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version);
@@ -513,17 +498,43 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
        /* Fetch the PHY phandle */
        priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0);
 
+       /* In the case of a fixed PHY, the DT node associated
+        * to the PHY is the Ethernet MAC DT node.
+        */
+       if (!priv->phy_dn && of_phy_is_fixed_link(dn)) {
+               ret = of_phy_register_fixed_link(dn);
+               if (ret)
+                       return ret;
+
+               priv->phy_dn = of_node_get(dn);
+       }
+
        /* Get the link mode */
-       priv->phy_interface = of_get_phy_mode(dn);
+       phy_mode = of_get_phy_mode(dn);
+       priv->phy_interface = phy_mode;
 
-       return 0;
-}
+       /* We need to specifically look up whether this PHY interface is internal
+        * or not *before* we even try to probe the PHY driver over MDIO as we
+        * may have shut down the internal PHY for power saving purposes.
+        */
+       if (phy_mode < 0) {
+               ret = of_property_read_string(dn, "phy-mode", &phy_mode_str);
+               if (ret < 0) {
+                       dev_err(kdev, "invalid PHY mode property\n");
+                       return ret;
+               }
 
-static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
-                                         struct fixed_phy_status *status)
-{
-       if (dev && dev->phydev && status)
-               status->link = dev->phydev->link;
+               priv->phy_interface = PHY_INTERFACE_MODE_NA;
+               if (!strcasecmp(phy_mode_str, "internal"))
+                       priv->internal_phy = true;
+       }
+
+       /* Make sure we initialize MoCA PHYs with a link down */
+       if (phy_mode == PHY_INTERFACE_MODE_MOCA) {
+               phydev = of_phy_find_device(dn);
+               if (phydev)
+                       phydev->link = 0;
+       }
 
        return 0;
 }
@@ -580,12 +591,9 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
                        return -ENODEV;
                }
 
-               if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) {
-                       ret = fixed_phy_set_link_update(
-                               phydev, bcmgenet_fixed_phy_link_update);
-                       if (!ret)
-                               phydev->link = 0;
-               }
+               /* Make sure we initialize MoCA PHYs with a link down */
+               phydev->link = 0;
+
        }
 
        priv->phydev = phydev;
@@ -614,10 +622,6 @@ int bcmgenet_mii_init(struct net_device *dev)
                return ret;
 
        ret = bcmgenet_mii_bus_init(priv);
-       if (ret)
-               goto out_free;
-
-       ret = bcmgenet_mii_probe(dev);
        if (ret)
                goto out;
 
@@ -626,7 +630,6 @@ int bcmgenet_mii_init(struct net_device *dev)
 out:
        of_node_put(priv->phy_dn);
        mdiobus_unregister(priv->mii_bus);
-out_free:
        kfree(priv->mii_bus->irq);
        mdiobus_free(priv->mii_bus);
        return ret;
index bf9eb2ecf96003e066418d93c7d22aae89c76a31..88c1e1a834f8c44491c76269be7c6aa805906ab2 100644 (file)
@@ -2774,8 +2774,7 @@ static const struct macb_config emac_config = {
 
 
 static const struct macb_config zynqmp_config = {
-       .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
-               MACB_CAPS_JUMBO,
+       .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
        .dma_burst_length = 16,
        .clk_init = macb_clk_init,
        .init = macb_init,
@@ -2783,8 +2782,7 @@ static const struct macb_config zynqmp_config = {
 };
 
 static const struct macb_config zynq_config = {
-       .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
-               MACB_CAPS_NO_GIGABIT_HALF,
+       .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
        .dma_burst_length = 16,
        .clk_init = macb_clk_init,
        .init = macb_init,
index 1895b6b2adddc00188b515004ca5359b860fdc0f..6e1faea00ca829f5c9df34b45518d7e1a881c5c3 100644 (file)
 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE       0x20000000
 #define MACB_CAPS_SG_DISABLED                  0x40000000
 #define MACB_CAPS_MACB_IS_GEM                  0x80000000
-#define MACB_CAPS_JUMBO                                0x00000008
+#define MACB_CAPS_JUMBO                                0x00000010
 
 /* Bit manipulation macros */
 #define MACB_BIT(name)                                 \
index c4d6bbe9458dbfe9c726fdff1e9b6c4c64b8a33f..3584420878782aa72800072e28997d40280dccec 100644 (file)
@@ -37,6 +37,8 @@ config        THUNDER_NIC_BGX
        tristate "Thunder MAC interface driver (BGX)"
        depends on 64BIT
        default ARCH_THUNDER
+       select PHYLIB
+       select MDIO_OCTEON
        ---help---
          This driver supports programming and controlling of MAC
          interface from NIC physical function driver.
index 629f75d703535d3ef5d8319a9ed2f3ec90a51ba9..58de4443eac0318234fd6b6ef46384243f14d7da 100644 (file)
@@ -767,6 +767,7 @@ struct adapter {
        bool tid_release_task_busy;
 
        struct dentry *debugfs_root;
+       u32 use_bd;     /* Use SGE Back Door intfc for reading SGE Contexts */
 
        spinlock_t stats_lock;
        spinlock_t win0_lock ____cacheline_aligned_in_smp;
index 6074680bc9858308fa68887140856c8bb05b4d6a..052c660aca80a8dd06593112c9961a6bdda07309 100644 (file)
@@ -31,6 +31,15 @@ static const char * const dcb_ver_array[] = {
        "Auto Negotiated"
 };
 
+static inline bool cxgb4_dcb_state_synced(enum cxgb4_dcb_state state)
+{
+       if (state == CXGB4_DCB_STATE_FW_ALLSYNCED ||
+           state == CXGB4_DCB_STATE_HOST)
+               return true;
+       else
+               return false;
+}
+
 /* Initialize a port's Data Center Bridging state.  Typically used after a
  * Link Down event.
  */
@@ -603,7 +612,7 @@ static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg)
        struct port_info *pi = netdev2pinfo(dev);
        struct port_dcb_info *dcb = &pi->dcb;
 
-       if (dcb->state != CXGB4_DCB_STATE_FW_ALLSYNCED ||
+       if (!cxgb4_dcb_state_synced(dcb->state) ||
            priority >= CXGB4_MAX_PRIORITY)
                *pfccfg = 0;
        else
@@ -620,7 +629,7 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg)
        struct adapter *adap = pi->adapter;
        int err;
 
-       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED ||
+       if (!cxgb4_dcb_state_synced(pi->dcb.state) ||
            priority >= CXGB4_MAX_PRIORITY)
                return;
 
@@ -732,7 +741,7 @@ static u8 cxgb4_getpfcstate(struct net_device *dev)
 {
        struct port_info *pi = netdev2pinfo(dev);
 
-       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+       if (!cxgb4_dcb_state_synced(pi->dcb.state))
                return false;
 
        return pi->dcb.pfcen != 0;
@@ -756,7 +765,7 @@ static int __cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id,
        struct adapter *adap = pi->adapter;
        int i;
 
-       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+       if (!cxgb4_dcb_state_synced(pi->dcb.state))
                return 0;
 
        for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
@@ -794,7 +803,9 @@ static int __cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id,
  */
 static int cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id)
 {
-       return __cxgb4_getapp(dev, app_idtype, app_id, 0);
+       /* Convert app_idtype to firmware format before querying */
+       return __cxgb4_getapp(dev, app_idtype == DCB_APP_IDTYPE_ETHTYPE ?
+                             app_idtype : 3, app_id, 0);
 }
 
 /* Write a new Application User Priority Map for the specified Application ID
@@ -808,7 +819,7 @@ static int __cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id,
        int i, err;
 
 
-       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+       if (!cxgb4_dcb_state_synced(pi->dcb.state))
                return -EINVAL;
 
        /* DCB info gets thrown away on link up */
@@ -896,10 +907,11 @@ cxgb4_ieee_negotiation_complete(struct net_device *dev,
        struct port_info *pi = netdev2pinfo(dev);
        struct port_dcb_info *dcb = &pi->dcb;
 
-       if (dcb_subtype && !(dcb->msgs & dcb_subtype))
-               return 0;
+       if (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED)
+               if (dcb_subtype && !(dcb->msgs & dcb_subtype))
+                       return 0;
 
-       return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED &&
+       return (cxgb4_dcb_state_synced(dcb->state) &&
                (dcb->supported & DCB_CAP_DCBX_VER_IEEE));
 }
 
@@ -1057,7 +1069,7 @@ static u8 cxgb4_setdcbx(struct net_device *dev, u8 dcb_request)
 
        /* Can't enable DCB if we haven't successfully negotiated it.
         */
-       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+       if (!cxgb4_dcb_state_synced(pi->dcb.state))
                return 1;
 
        /* There's currently no mechanism to allow for the firmware DCBX
@@ -1080,7 +1092,7 @@ static int cxgb4_getpeer_app(struct net_device *dev,
        struct adapter *adap = pi->adapter;
        int i, err = 0;
 
-       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+       if (!cxgb4_dcb_state_synced(pi->dcb.state))
                return 1;
 
        info->willing = 0;
@@ -1114,7 +1126,7 @@ static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table)
        struct adapter *adap = pi->adapter;
        int i, err = 0;
 
-       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+       if (!cxgb4_dcb_state_synced(pi->dcb.state))
                return 1;
 
        for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
@@ -1133,7 +1145,7 @@ static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table)
                if (!pcmd.u.dcb.app_priority.protocolid)
                        break;
 
-               table[i].selector = pcmd.u.dcb.app_priority.sel_field;
+               table[i].selector = (pcmd.u.dcb.app_priority.sel_field + 1);
                table[i].protocol =
                        be16_to_cpu(pcmd.u.dcb.app_priority.protocolid);
                table[i].priority =
@@ -1181,6 +1193,8 @@ static int cxgb4_cee_peer_getpg(struct net_device *dev, struct cee_pg *pg)
        for (i = 0; i < CXGB4_MAX_PRIORITY; i++)
                pg->pg_bw[i] = pcmd.u.dcb.pgrate.pgrate[i];
 
+       pg->tcs_supported = pcmd.u.dcb.pgrate.num_tcs_supported;
+
        return 0;
 }
 
@@ -1198,6 +1212,8 @@ static int cxgb4_cee_peer_getpfc(struct net_device *dev, struct cee_pfc *pfc)
         */
        pfc->pfc_en = bitswap_1(pi->dcb.pfcen);
 
+       pfc->tcs_supported = pi->dcb.pfc_num_tcs_supported;
+
        return 0;
 }
 
index a11485fbb33f2b7bcd6c973324ea41601dbaf575..f701a6f20c6af6f67a8ab40d81d63b5fb0c921a4 100644 (file)
@@ -151,6 +151,45 @@ static int cim_la_show_3in1(struct seq_file *seq, void *v, int idx)
        return 0;
 }
 
+static int cim_la_show_t6(struct seq_file *seq, void *v, int idx)
+{
+       if (v == SEQ_START_TOKEN) {
+               seq_puts(seq, "Status   Inst    Data      PC     LS0Stat  "
+                        "LS0Addr  LS0Data  LS1Stat  LS1Addr  LS1Data\n");
+       } else {
+               const u32 *p = v;
+
+               seq_printf(seq, "  %02x   %04x%04x %04x%04x %04x%04x %08x %08x %08x %08x %08x %08x\n",
+                          (p[9] >> 16) & 0xff,       /* Status */
+                          p[9] & 0xffff, p[8] >> 16, /* Inst */
+                          p[8] & 0xffff, p[7] >> 16, /* Data */
+                          p[7] & 0xffff, p[6] >> 16, /* PC */
+                          p[2], p[1], p[0],      /* LS0 Stat, Addr and Data */
+                          p[5], p[4], p[3]);     /* LS1 Stat, Addr and Data */
+       }
+       return 0;
+}
+
+static int cim_la_show_pc_t6(struct seq_file *seq, void *v, int idx)
+{
+       if (v == SEQ_START_TOKEN) {
+               seq_puts(seq, "Status   Inst    Data      PC\n");
+       } else {
+               const u32 *p = v;
+
+               seq_printf(seq, "  %02x   %08x %08x %08x\n",
+                          p[3] & 0xff, p[2], p[1], p[0]);
+               seq_printf(seq, "  %02x   %02x%06x %02x%06x %02x%06x\n",
+                          (p[6] >> 8) & 0xff, p[6] & 0xff, p[5] >> 8,
+                          p[5] & 0xff, p[4] >> 8, p[4] & 0xff, p[3] >> 8);
+               seq_printf(seq, "  %02x   %04x%04x %04x%04x %04x%04x\n",
+                          (p[9] >> 16) & 0xff, p[9] & 0xffff, p[8] >> 16,
+                          p[8] & 0xffff, p[7] >> 16, p[7] & 0xffff,
+                          p[6] >> 16);
+       }
+       return 0;
+}
+
 static int cim_la_open(struct inode *inode, struct file *file)
 {
        int ret;
@@ -162,9 +201,18 @@ static int cim_la_open(struct inode *inode, struct file *file)
        if (ret)
                return ret;
 
-       p = seq_open_tab(file, adap->params.cim_la_size / 8, 8 * sizeof(u32), 1,
-                        cfg & UPDBGLACAPTPCONLY_F ?
-                        cim_la_show_3in1 : cim_la_show);
+       if (is_t6(adap->params.chip)) {
+               /* +1 to account for integer division of CIMLA_SIZE/10 */
+               p = seq_open_tab(file, (adap->params.cim_la_size / 10) + 1,
+                                10 * sizeof(u32), 1,
+                                cfg & UPDBGLACAPTPCONLY_F ?
+                                       cim_la_show_pc_t6 : cim_la_show_t6);
+       } else {
+               p = seq_open_tab(file, adap->params.cim_la_size / 8,
+                                8 * sizeof(u32), 1,
+                                cfg & UPDBGLACAPTPCONLY_F ? cim_la_show_3in1 :
+                                                            cim_la_show);
+       }
        if (!p)
                return -ENOMEM;
 
@@ -2340,6 +2388,8 @@ int t4_setup_debugfs(struct adapter *adap)
 
        de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
                                      &flash_debugfs_fops, adap->params.sf_size);
+       debugfs_create_bool("use_backdoor", S_IWUSR | S_IRUSR,
+                           adap->debugfs_root, &adap->use_bd);
 
        return 0;
 }
index 687acf71fa15e01e5886b5055f9bc9b8ccbc4929..5eedb98ff581a8c67dd8284a1cc2ad9fd3fbdd6e 100644 (file)
@@ -925,6 +925,20 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
        const struct firmware *fw;
        struct adapter *adap = netdev2adap(netdev);
        unsigned int mbox = PCIE_FW_MASTER_M + 1;
+       u32 pcie_fw;
+       unsigned int master;
+       u8 master_vld = 0;
+
+       pcie_fw = t4_read_reg(adap, PCIE_FW_A);
+       master = PCIE_FW_MASTER_G(pcie_fw);
+       if (pcie_fw & PCIE_FW_MASTER_VLD_F)
+               master_vld = 1;
+       /* if csiostor is the master return */
+       if (master_vld && (master != adap->pf)) {
+               dev_warn(adap->pdev_dev,
+                        "cxgb4 driver needs to be loaded as MASTER to support FW flash\n");
+               return -EOPNOTSUPP;
+       }
 
        ef->data[sizeof(ef->data) - 1] = '\0';
        ret = request_firmware(&fw, ef->data, adap->pdev_dev);
index 351f3b1bf80025167c9afcc226252ec923a639b1..d582e175dfb61827be5304636df03f7859cd5dbb 100644 (file)
@@ -4757,7 +4757,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
         */
        cfg_queues(adapter);
 
-       adapter->l2t = t4_init_l2t();
+       adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
        if (!adapter->l2t) {
                /* We tolerate a lack of L2T, giving up some functionality */
                dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
index 252efc29321f4e7c57a53e631b520ea8820f6900..ac27898c6ab0b249ad6ced9308b55cd40f883ece 100644 (file)
 #define VLAN_NONE 0xfff
 
 /* identifies sync vs async L2T_WRITE_REQs */
-#define F_SYNC_WR    (1 << 12)
-
-enum {
-       L2T_STATE_VALID,      /* entry is up to date */
-       L2T_STATE_STALE,      /* entry may be used but needs revalidation */
-       L2T_STATE_RESOLVING,  /* entry needs address resolution */
-       L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
-
-       /* when state is one of the below the entry is not hashed */
-       L2T_STATE_SWITCHING,  /* entry is being used by a switching filter */
-       L2T_STATE_UNUSED      /* entry not in use */
-};
+#define SYNC_WR_S    12
+#define SYNC_WR_V(x) ((x) << SYNC_WR_S)
+#define SYNC_WR_F    SYNC_WR_V(1)
 
 struct l2t_data {
+       unsigned int l2t_start;     /* start index of our piece of the L2T */
+       unsigned int l2t_size;      /* number of entries in l2tab */
        rwlock_t lock;
        atomic_t nfree;             /* number of free entries */
        struct l2t_entry *rover;    /* starting point for next allocation */
-       struct l2t_entry l2tab[L2T_SIZE];
+       struct l2t_entry l2tab[0];  /* MUST BE LAST */
 };
 
 static inline unsigned int vlan_prio(const struct l2t_entry *e)
@@ -85,29 +78,36 @@ static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
 /*
  * To avoid having to check address families we do not allow v4 and v6
  * neighbors to be on the same hash chain.  We keep v4 entries in the first
- * half of available hash buckets and v6 in the second.
+ * half of available hash buckets and v6 in the second.  We need at least two
+ * entries in our L2T for this scheme to work.
  */
 enum {
-       L2T_SZ_HALF = L2T_SIZE / 2,
-       L2T_HASH_MASK = L2T_SZ_HALF - 1
+       L2T_MIN_HASH_BUCKETS = 2,
 };
 
-static inline unsigned int arp_hash(const u32 *key, int ifindex)
+static inline unsigned int arp_hash(struct l2t_data *d, const u32 *key,
+                                   int ifindex)
 {
-       return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK;
+       unsigned int l2t_size_half = d->l2t_size / 2;
+
+       return jhash_2words(*key, ifindex, 0) % l2t_size_half;
 }
 
-static inline unsigned int ipv6_hash(const u32 *key, int ifindex)
+static inline unsigned int ipv6_hash(struct l2t_data *d, const u32 *key,
+                                    int ifindex)
 {
+       unsigned int l2t_size_half = d->l2t_size / 2;
        u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
 
-       return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK);
+       return (l2t_size_half +
+               (jhash_2words(xor, ifindex, 0) % l2t_size_half));
 }
 
-static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex)
+static unsigned int addr_hash(struct l2t_data *d, const u32 *addr,
+                             int addr_len, int ifindex)
 {
-       return addr_len == 4 ? arp_hash(addr, ifindex) :
-                              ipv6_hash(addr, ifindex);
+       return addr_len == 4 ? arp_hash(d, addr, ifindex) :
+                              ipv6_hash(d, addr, ifindex);
 }
 
 /*
@@ -139,6 +139,8 @@ static void neigh_replace(struct l2t_entry *e, struct neighbour *n)
  */
 static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
 {
+       struct l2t_data *d = adap->l2t;
+       unsigned int l2t_idx = e->idx + d->l2t_start;
        struct sk_buff *skb;
        struct cpl_l2t_write_req *req;
 
@@ -150,10 +152,10 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
        INIT_TP_WR(req, 0);
 
        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
-                                       e->idx | (sync ? F_SYNC_WR : 0) |
+                                       l2t_idx | (sync ? SYNC_WR_F : 0) |
                                        TID_QID_V(adap->sge.fw_evtq.abs_id)));
        req->params = htons(L2T_W_PORT_V(e->lport) | L2T_W_NOREPLY_V(!sync));
-       req->l2t_idx = htons(e->idx);
+       req->l2t_idx = htons(l2t_idx);
        req->vlan = htons(e->vlan);
        if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
                memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
@@ -190,18 +192,19 @@ static void send_pending(struct adapter *adap, struct l2t_entry *e)
  */
 void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
 {
+       struct l2t_data *d = adap->l2t;
        unsigned int tid = GET_TID(rpl);
-       unsigned int idx = tid & (L2T_SIZE - 1);
+       unsigned int l2t_idx = tid % L2T_SIZE;
 
        if (unlikely(rpl->status != CPL_ERR_NONE)) {
                dev_err(adap->pdev_dev,
                        "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
-                       rpl->status, idx);
+                       rpl->status, l2t_idx);
                return;
        }
 
-       if (tid & F_SYNC_WR) {
-               struct l2t_entry *e = &adap->l2t->l2tab[idx];
+       if (tid & SYNC_WR_F) {
+               struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start];
 
                spin_lock(&e->lock);
                if (e->state != L2T_STATE_SWITCHING) {
@@ -276,7 +279,7 @@ static struct l2t_entry *alloc_l2e(struct l2t_data *d)
                return NULL;
 
        /* there's definitely a free entry */
-       for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e)
+       for (e = d->rover, end = &d->l2tab[d->l2t_size]; e != end; ++e)
                if (atomic_read(&e->refcnt) == 0)
                        goto found;
 
@@ -368,7 +371,7 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
        int addr_len = neigh->tbl->key_len;
        u32 *addr = (u32 *)neigh->primary_key;
        int ifidx = neigh->dev->ifindex;
-       int hash = addr_hash(addr, addr_len, ifidx);
+       int hash = addr_hash(d, addr, addr_len, ifidx);
 
        if (neigh->dev->flags & IFF_LOOPBACK)
                lport = netdev2pinfo(physdev)->tx_chan + 4;
@@ -481,7 +484,7 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
        int addr_len = neigh->tbl->key_len;
        u32 *addr = (u32 *) neigh->primary_key;
        int ifidx = neigh->dev->ifindex;
-       int hash = addr_hash(addr, addr_len, ifidx);
+       int hash = addr_hash(d, addr, addr_len, ifidx);
 
        read_lock_bh(&d->lock);
        for (e = d->l2tab[hash].first; e; e = e->next)
@@ -554,20 +557,30 @@ int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
        return write_l2e(adap, e, 0);
 }
 
-struct l2t_data *t4_init_l2t(void)
+struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
 {
+       unsigned int l2t_size;
        int i;
        struct l2t_data *d;
 
-       d = t4_alloc_mem(sizeof(*d));
+       if (l2t_start >= l2t_end || l2t_end >= L2T_SIZE)
+               return NULL;
+       l2t_size = l2t_end - l2t_start + 1;
+       if (l2t_size < L2T_MIN_HASH_BUCKETS)
+               return NULL;
+
+       d = t4_alloc_mem(sizeof(*d) + l2t_size * sizeof(struct l2t_entry));
        if (!d)
                return NULL;
 
+       d->l2t_start = l2t_start;
+       d->l2t_size = l2t_size;
+
        d->rover = d->l2tab;
-       atomic_set(&d->nfree, L2T_SIZE);
+       atomic_set(&d->nfree, l2t_size);
        rwlock_init(&d->lock);
 
-       for (i = 0; i < L2T_SIZE; ++i) {
+       for (i = 0; i < d->l2t_size; ++i) {
                d->l2tab[i].idx = i;
                d->l2tab[i].state = L2T_STATE_UNUSED;
                spin_lock_init(&d->l2tab[i].lock);
@@ -578,9 +591,9 @@ struct l2t_data *t4_init_l2t(void)
 
 static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
 {
-       struct l2t_entry *l2tab = seq->private;
+       struct l2t_data *d = seq->private;
 
-       return pos >= L2T_SIZE ? NULL : &l2tab[pos];
+       return pos >= d->l2t_size ? NULL : &d->l2tab[pos];
 }
 
 static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
@@ -620,6 +633,7 @@ static int l2t_seq_show(struct seq_file *seq, void *v)
                         "Ethernet address  VLAN/P LP State Users Port\n");
        else {
                char ip[60];
+               struct l2t_data *d = seq->private;
                struct l2t_entry *e = v;
 
                spin_lock_bh(&e->lock);
@@ -628,7 +642,7 @@ static int l2t_seq_show(struct seq_file *seq, void *v)
                else
                        sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr);
                seq_printf(seq, "%4u %-25s %17pM %4d %u %2u   %c   %5u %s\n",
-                          e->idx, ip, e->dmac,
+                          e->idx + d->l2t_start, ip, e->dmac,
                           e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport,
                           l2e_state(e), atomic_read(&e->refcnt),
                           e->neigh ? e->neigh->dev->name : "");
@@ -652,7 +666,7 @@ static int l2t_seq_open(struct inode *inode, struct file *file)
                struct adapter *adap = inode->i_private;
                struct seq_file *seq = file->private_data;
 
-               seq->private = adap->l2t->l2tab;
+               seq->private = adap->l2t;
        }
        return rc;
 }
index a30126ce90cbabeaf50d7ed3fc5f1531c3831db9..b38dc526aad563a3b27b6b79e2e8d7c093b84ff0 100644 (file)
 #include <linux/if_ether.h>
 #include <linux/atomic.h>
 
+enum { L2T_SIZE = 4096 };     /* # of L2T entries */
+
+enum {
+       L2T_STATE_VALID,      /* entry is up to date */
+       L2T_STATE_STALE,      /* entry may be used but needs revalidation */
+       L2T_STATE_RESOLVING,  /* entry needs address resolution */
+       L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
+       L2T_STATE_NOARP,      /* Netdev down or removed*/
+
+       /* when state is one of the below the entry is not hashed */
+       L2T_STATE_SWITCHING,  /* entry is being used by a switching filter */
+       L2T_STATE_UNUSED      /* entry not in use */
+};
+
 struct adapter;
 struct l2t_data;
 struct neighbour;
@@ -56,7 +70,7 @@ struct cpl_l2t_write_rpl;
  */
 struct l2t_entry {
        u16 state;                  /* entry state */
-       u16 idx;                    /* entry index */
+       u16 idx;                    /* entry index within in-memory table */
        u32 addr[4];                /* next hop IP or IPv6 address */
        int ifindex;                /* neighbor's net_device's ifindex */
        struct neighbour *neigh;    /* associated neighbour */
@@ -104,7 +118,7 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
 struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
 int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
                         u8 port, u8 *eth_addr);
-struct l2t_data *t4_init_l2t(void);
+struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end);
 void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
 
 extern const struct file_operations t4_l2t_fops;
index 942db078f33a6fa0332c627762695386f688aa07..d4248d74f5601b711c1d0c9d65923ec7acce627c 100644 (file)
@@ -1137,7 +1137,7 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
  */
 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-       u32 wr_mid;
+       u32 wr_mid, ctrl0;
        u64 cntrl, *end;
        int qidx, credits;
        unsigned int flits, ndesc;
@@ -1274,9 +1274,15 @@ out_free:        dev_kfree_skb_any(skb);
 #endif /* CONFIG_CHELSIO_T4_FCOE */
        }
 
-       cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
-                          TXPKT_INTF_V(pi->tx_chan) |
-                          TXPKT_PF_V(adap->pf));
+       ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
+               TXPKT_PF_V(adap->pf);
+#ifdef CONFIG_CHELSIO_T4_DCB
+       if (is_t4(adap->params.chip))
+               ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
+       else
+               ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio);
+#endif
+       cpl->ctrl0 = htonl(ctrl0);
        cpl->pack = htons(0);
        cpl->len = htons(skb->len);
        cpl->ctrl1 = cpu_to_be64(cntrl);
index 2b52aae7ec86d38b9e658e6e92d1f82ae3c6c879..800bd489dd751d14909a7054a7883ae73ebeed1c 100644 (file)
@@ -1345,9 +1345,9 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x5a80, 0x5a9c,
                0x5b94, 0x5bfc,
                0x5c10, 0x5ec0,
-               0x5ec8, 0x5ec8,
+               0x5ec8, 0x5ecc,
                0x6000, 0x6040,
-               0x6058, 0x6154,
+               0x6058, 0x615c,
                0x7700, 0x7798,
                0x77c0, 0x7880,
                0x78cc, 0x78fc,
@@ -1371,20 +1371,22 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x9f00, 0x9f6c,
                0x9f80, 0xa020,
                0xd004, 0xd03c,
+               0xd100, 0xd118,
+               0xd200, 0xd31c,
                0xdfc0, 0xdfe0,
                0xe000, 0xf008,
                0x11000, 0x11014,
                0x11048, 0x11110,
                0x11118, 0x1117c,
-               0x11190, 0x11260,
+               0x11190, 0x11264,
                0x11300, 0x1130c,
-               0x12000, 0x1205c,
+               0x12000, 0x1206c,
                0x19040, 0x1906c,
                0x19078, 0x19080,
                0x1908c, 0x19124,
                0x19150, 0x191b0,
                0x191d0, 0x191e8,
-               0x19238, 0x192b8,
+               0x19238, 0x192bc,
                0x193f8, 0x19474,
                0x19490, 0x194cc,
                0x194f0, 0x194f8,
@@ -1466,7 +1468,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x30200, 0x30318,
                0x30400, 0x3052c,
                0x30540, 0x3061c,
-               0x30800, 0x3088c,
+               0x30800, 0x30890,
                0x308c0, 0x30908,
                0x30910, 0x309b8,
                0x30a00, 0x30a04,
@@ -1544,7 +1546,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
                0x34200, 0x34318,
                0x34400, 0x3452c,
                0x34540, 0x3461c,
-               0x34800, 0x3488c,
+               0x34800, 0x34890,
                0x348c0, 0x34908,
                0x34910, 0x349b8,
                0x34a00, 0x34a04,
@@ -3687,6 +3689,11 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
        return 0;
 }
 
+static unsigned int t4_use_ldst(struct adapter *adap)
+{
+       return (adap->flags & FW_OK) || !adap->use_bd;
+}
+
 /**
  *     t4_fw_tp_pio_rw - Access TP PIO through LDST
  *     @adap: the adapter
@@ -3730,7 +3737,7 @@ static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
  */
 void t4_read_rss_key(struct adapter *adap, u32 *key)
 {
-       if (adap->flags & FW_OK)
+       if (t4_use_ldst(adap))
                t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1);
        else
                t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
@@ -3760,7 +3767,7 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
            (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
                rss_key_addr_cnt = 32;
 
-       if (adap->flags & FW_OK)
+       if (t4_use_ldst(adap))
                t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0);
        else
                t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
@@ -3789,7 +3796,7 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
                           u32 *valp)
 {
-       if (adapter->flags & FW_OK)
+       if (t4_use_ldst(adapter))
                t4_fw_tp_pio_rw(adapter, valp, 1,
                                TP_RSS_PF0_CONFIG_A + index, 1);
        else
@@ -3829,7 +3836,7 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
 
        /* Grab the VFL/VFH values ...
         */
-       if (adapter->flags & FW_OK) {
+       if (t4_use_ldst(adapter)) {
                t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1);
                t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1);
        } else {
@@ -3850,7 +3857,7 @@ u32 t4_read_rss_pf_map(struct adapter *adapter)
 {
        u32 pfmap;
 
-       if (adapter->flags & FW_OK)
+       if (t4_use_ldst(adapter))
                t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1);
        else
                t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
@@ -3868,7 +3875,7 @@ u32 t4_read_rss_pf_mask(struct adapter *adapter)
 {
        u32 pfmask;
 
-       if (adapter->flags & FW_OK)
+       if (t4_use_ldst(adapter))
                t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1);
        else
                t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
@@ -3924,43 +3931,25 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
  */
 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
 {
-       /* T6 and later has 2 channels */
-       if (adap->params.arch.nchan == NCHAN) {
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->mac_in_errs, 12, TP_MIB_MAC_IN_ERR_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->tnl_cong_drops, 8,
-                                TP_MIB_TNL_CNG_DROP_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->tnl_tx_drops, 4,
-                                TP_MIB_TNL_DROP_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->ofld_vlan_drops, 4,
-                                TP_MIB_OFD_VLN_DROP_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->tcp6_in_errs, 4,
-                                TP_MIB_TCP_V6IN_ERR_0_A);
-       } else {
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->mac_in_errs, 2, TP_MIB_MAC_IN_ERR_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->hdr_in_errs, 2, TP_MIB_HDR_IN_ERR_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->tcp_in_errs, 2, TP_MIB_TCP_IN_ERR_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->tnl_cong_drops, 2,
-                                TP_MIB_TNL_CNG_DROP_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->ofld_chan_drops, 2,
-                                TP_MIB_OFD_CHN_DROP_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->tnl_tx_drops, 2, TP_MIB_TNL_DROP_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->ofld_vlan_drops, 2,
-                                TP_MIB_OFD_VLN_DROP_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
-                                st->tcp6_in_errs, 2, TP_MIB_TCP_V6IN_ERR_0_A);
-       }
+       int nchan = adap->params.arch.nchan;
+
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                        st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A);
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                        st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A);
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                        st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A);
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                        st->tnl_cong_drops, nchan, TP_MIB_TNL_CNG_DROP_0_A);
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                        st->ofld_chan_drops, nchan, TP_MIB_OFD_CHN_DROP_0_A);
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                        st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A);
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                        st->ofld_vlan_drops, nchan, TP_MIB_OFD_VLN_DROP_0_A);
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+                        st->tcp6_in_errs, nchan, TP_MIB_TCP_V6IN_ERR_0_A);
+
        t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
                         &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A);
 }
@@ -3974,16 +3963,13 @@ void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
  */
 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
 {
-       /* T6 and later has 2 channels */
-       if (adap->params.arch.nchan == NCHAN) {
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
-                                8, TP_MIB_CPL_IN_REQ_0_A);
-       } else {
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
-                                2, TP_MIB_CPL_IN_REQ_0_A);
-               t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
-                                2, TP_MIB_CPL_OUT_RSP_0_A);
-       }
+       int nchan = adap->params.arch.nchan;
+
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
+                        nchan, TP_MIB_CPL_IN_REQ_0_A);
+       t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
+                        nchan, TP_MIB_CPL_OUT_RSP_0_A);
+
 }
 
 /**
@@ -6294,7 +6280,7 @@ int t4_init_tp_params(struct adapter *adap)
        /* Cache the adapter's Compressed Filter Mode and global Incress
         * Configuration.
         */
-       if (adap->flags & FW_OK) {
+       if (t4_use_ldst(adap)) {
                t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1,
                                TP_VLAN_PRI_MAP_A, 1);
                t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1,
index c8488f430d197337d7fb81d62689e2c12c583292..640369df8b3a3cb155eb35c9d7a08009d0716da2 100644 (file)
@@ -47,7 +47,6 @@ enum {
        TCB_SIZE       = 128,   /* TCB size */
        NMTUS          = 16,    /* size of MTU table */
        NCCTRL_WIN     = 32,    /* # of congestion control windows */
-       L2T_SIZE       = 4096,  /* # of L2T entries */
        PM_NSTATS      = 5,     /* # of PM stats */
        MBOX_LEN       = 64,    /* mailbox size in bytes */
        TRACE_LEN      = 112,   /* length of trace data and mask */
index 132cb8fc0bf7167703bab62109e384281ba33836..b99144afd4ecc8958961acf2673c53bdcd8fa8df 100644 (file)
@@ -660,6 +660,9 @@ struct cpl_tx_pkt {
 #define TXPKT_OVLAN_IDX_S    12
 #define TXPKT_OVLAN_IDX_V(x) ((x) << TXPKT_OVLAN_IDX_S)
 
+#define TXPKT_T5_OVLAN_IDX_S   12
+#define TXPKT_T5_OVLAN_IDX_V(x)        ((x) << TXPKT_T5_OVLAN_IDX_S)
+
 #define TXPKT_INTF_S    16
 #define TXPKT_INTF_V(x) ((x) << TXPKT_INTF_S)
 
index d7ca106927b0d93ee68480c2e45c29879fe9e510..8353a6cbfcc21edd2dde363fafd06b202611cae4 100644 (file)
@@ -142,6 +142,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
        CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */
        CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */
        CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
+       CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */
+       CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */
        CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
        CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
        CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
@@ -155,6 +157,22 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
        CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */
        CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */
        CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */
+
+       /* T6 adapters:
+        */
+       CH_PCI_ID_TABLE_FENTRY(0x6001),
+       CH_PCI_ID_TABLE_FENTRY(0x6002),
+       CH_PCI_ID_TABLE_FENTRY(0x6003),
+       CH_PCI_ID_TABLE_FENTRY(0x6004),
+       CH_PCI_ID_TABLE_FENTRY(0x6005),
+       CH_PCI_ID_TABLE_FENTRY(0x6006),
+       CH_PCI_ID_TABLE_FENTRY(0x6007),
+       CH_PCI_ID_TABLE_FENTRY(0x6009),
+       CH_PCI_ID_TABLE_FENTRY(0x600d),
+       CH_PCI_ID_TABLE_FENTRY(0x6010),
+       CH_PCI_ID_TABLE_FENTRY(0x6011),
+       CH_PCI_ID_TABLE_FENTRY(0x6014),
+       CH_PCI_ID_TABLE_FENTRY(0x6015),
 CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
 
 #endif /* __T4_PCI_ID_TBL_H__ */
index 375a825573b0edb546fda908606fc1f6c6367212..ed8a8f3501139fbb7b43f9bfaf8d0cc6683a6cd4 100644 (file)
 #define EGRTHRESHOLDPACKING_G(x) \
        (((x) >> EGRTHRESHOLDPACKING_S) & EGRTHRESHOLDPACKING_M)
 
+#define T6_EGRTHRESHOLDPACKING_S    16
+#define T6_EGRTHRESHOLDPACKING_M    0xffU
+#define T6_EGRTHRESHOLDPACKING_G(x) \
+       (((x) >> T6_EGRTHRESHOLDPACKING_S) & T6_EGRTHRESHOLDPACKING_M)
+
 #define SGE_TIMESTAMP_LO_A 0x1098
 #define SGE_TIMESTAMP_HI_A 0x109c
 
index ad53e5ad2acd05afa1b94c09f7e2c1be8f4599a6..fa3786a9d30ea95223ab078da5efce42530d81f2 100644 (file)
@@ -1898,7 +1898,10 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
                rspq->unhandled_irqs++;
 
        val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
-       if (is_t4(rspq->adapter->params.chip)) {
+       /* If we don't have access to the new User GTS (T5+), use the old
+        * doorbell mechanism; otherwise use the new BAR2 mechanism.
+        */
+       if (unlikely(!rspq->bar2_addr)) {
                t4_write_reg(rspq->adapter,
                             T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
                             val | INGRESSQID_V((u32)rspq->cntxt_id));
@@ -1998,10 +2001,13 @@ static unsigned int process_intrq(struct adapter *adapter)
        }
 
        val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
-       if (is_t4(adapter->params.chip))
+       /* If we don't have access to the new User GTS (T5+), use the old
+        * doorbell mechanism; otherwise use the new BAR2 mechanism.
+        */
+       if (unlikely(!intrq->bar2_addr)) {
                t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
                             val | INGRESSQID_V(intrq->cntxt_id));
-       else {
+       else {
                writel(val | INGRESSQID_V(intrq->bar2_qid),
                       intrq->bar2_addr + SGE_UDB_GTS);
                wmb();
@@ -2662,8 +2668,22 @@ int t4vf_sge_init(struct adapter *adapter)
         * give it more Free List entries.  (Note that the SGE's Egress
         * Congestion Threshold is in units of 2 Free List pointers.)
         */
-       s->fl_starve_thres
-               = EGRTHRESHOLD_G(sge_params->sge_congestion_control)*2 + 1;
+       switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
+       case CHELSIO_T4:
+               s->fl_starve_thres =
+                  EGRTHRESHOLD_G(sge_params->sge_congestion_control);
+               break;
+       case CHELSIO_T5:
+               s->fl_starve_thres =
+                  EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
+               break;
+       case CHELSIO_T6:
+       default:
+               s->fl_starve_thres =
+                  T6_EGRTHRESHOLDPACKING_G(sge_params->sge_congestion_control);
+               break;
+       }
+       s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
 
        /*
         * Set up tasklet timers.
index f3f1601a76f37ffe7886da67427f6b79e8435321..f44a39c40642c147e03d79d2982031300e437d29 100644 (file)
@@ -224,7 +224,8 @@ static int enic_get_coalesce(struct net_device *netdev,
        struct enic *enic = netdev_priv(netdev);
        struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
 
-       ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
+       if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
+               ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
        ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
        if (rxcoal->use_adaptive_rx_coalesce)
                ecmd->use_adaptive_rx_coalesce = 1;
@@ -234,6 +235,53 @@ static int enic_get_coalesce(struct net_device *netdev,
        return 0;
 }
 
+static int enic_coalesce_valid(struct enic *enic,
+                              struct ethtool_coalesce *ec)
+{
+       u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
+       u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max,
+                                          ec->rx_coalesce_usecs_high);
+       u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max,
+                                         ec->rx_coalesce_usecs_low);
+
+       if (ec->rx_max_coalesced_frames         ||
+           ec->rx_coalesce_usecs_irq           ||
+           ec->rx_max_coalesced_frames_irq     ||
+           ec->tx_max_coalesced_frames         ||
+           ec->tx_coalesce_usecs_irq           ||
+           ec->tx_max_coalesced_frames_irq     ||
+           ec->stats_block_coalesce_usecs      ||
+           ec->use_adaptive_tx_coalesce        ||
+           ec->pkt_rate_low                    ||
+           ec->rx_max_coalesced_frames_low     ||
+           ec->tx_coalesce_usecs_low           ||
+           ec->tx_max_coalesced_frames_low     ||
+           ec->pkt_rate_high                   ||
+           ec->rx_max_coalesced_frames_high    ||
+           ec->tx_coalesce_usecs_high          ||
+           ec->tx_max_coalesced_frames_high    ||
+           ec->rate_sample_interval)
+               return -EINVAL;
+
+       if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) &&
+           ec->tx_coalesce_usecs)
+               return -EINVAL;
+
+       if ((ec->tx_coalesce_usecs > coalesce_usecs_max)        ||
+           (ec->rx_coalesce_usecs > coalesce_usecs_max)        ||
+           (ec->rx_coalesce_usecs_low > coalesce_usecs_max)    ||
+           (ec->rx_coalesce_usecs_high > coalesce_usecs_max))
+               netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n",
+                           coalesce_usecs_max);
+
+       if (ec->rx_coalesce_usecs_high &&
+           (rx_coalesce_usecs_high <
+            rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
+               return -EINVAL;
+
+       return 0;
+}
+
 static int enic_set_coalesce(struct net_device *netdev,
        struct ethtool_coalesce *ecmd)
 {
@@ -244,8 +292,12 @@ static int enic_set_coalesce(struct net_device *netdev,
        u32 rx_coalesce_usecs_high;
        u32 coalesce_usecs_max;
        unsigned int i, intr;
+       int ret;
        struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
 
+       ret = enic_coalesce_valid(enic, ecmd);
+       if (ret)
+               return ret;
        coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
        tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
                                  coalesce_usecs_max);
@@ -257,59 +309,24 @@ static int enic_set_coalesce(struct net_device *netdev,
        rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
                                       coalesce_usecs_max);
 
-       switch (vnic_dev_get_intr_mode(enic->vdev)) {
-       case VNIC_DEV_INTR_MODE_INTX:
-               if (tx_coalesce_usecs != rx_coalesce_usecs)
-                       return -EINVAL;
-               if (ecmd->use_adaptive_rx_coalesce      ||
-                   ecmd->rx_coalesce_usecs_low         ||
-                   ecmd->rx_coalesce_usecs_high)
-                       return -EINVAL;
-
-               intr = enic_legacy_io_intr();
-               vnic_intr_coalescing_timer_set(&enic->intr[intr],
-                       tx_coalesce_usecs);
-               break;
-       case VNIC_DEV_INTR_MODE_MSI:
-               if (tx_coalesce_usecs != rx_coalesce_usecs)
-                       return -EINVAL;
-               if (ecmd->use_adaptive_rx_coalesce      ||
-                   ecmd->rx_coalesce_usecs_low         ||
-                   ecmd->rx_coalesce_usecs_high)
-                       return -EINVAL;
-
-               vnic_intr_coalescing_timer_set(&enic->intr[0],
-                       tx_coalesce_usecs);
-               break;
-       case VNIC_DEV_INTR_MODE_MSIX:
-               if (ecmd->rx_coalesce_usecs_high &&
-                   (rx_coalesce_usecs_high <
-                    rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
-                               return -EINVAL;
-
+       if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
                for (i = 0; i < enic->wq_count; i++) {
                        intr = enic_msix_wq_intr(enic, i);
                        vnic_intr_coalescing_timer_set(&enic->intr[intr],
-                               tx_coalesce_usecs);
-               }
-
-               rxcoal->use_adaptive_rx_coalesce =
-                                       !!ecmd->use_adaptive_rx_coalesce;
-               if (!rxcoal->use_adaptive_rx_coalesce)
-                       enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
-
-               if (ecmd->rx_coalesce_usecs_high) {
-                       rxcoal->range_end = rx_coalesce_usecs_high;
-                       rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
-                       rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
-                                                       ENIC_AIC_LARGE_PKT_DIFF;
+                                                      tx_coalesce_usecs);
                }
-               break;
-       default:
-               break;
+               enic->tx_coalesce_usecs = tx_coalesce_usecs;
+       }
+       rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce;
+       if (!rxcoal->use_adaptive_rx_coalesce)
+               enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
+       if (ecmd->rx_coalesce_usecs_high) {
+               rxcoal->range_end = rx_coalesce_usecs_high;
+               rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
+               rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
+                                               ENIC_AIC_LARGE_PKT_DIFF;
        }
 
-       enic->tx_coalesce_usecs = tx_coalesce_usecs;
        enic->rx_coalesce_usecs = rx_coalesce_usecs;
 
        return 0;
index 918a8e42139b1f8ba9d95a4a9a1300c014324db8..8f646e4e968b329ab53dcb70af3c733e7788661f 100644 (file)
@@ -1149,6 +1149,64 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
        return 0;
 }
 
+static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
+{
+       unsigned int intr = enic_msix_rq_intr(enic, rq->index);
+       struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+       u32 timer = cq->tobe_rx_coal_timeval;
+
+       if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
+               vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
+               cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
+       }
+}
+
+static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
+{
+       struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+       struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+       struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
+       int index;
+       u32 timer;
+       u32 range_start;
+       u32 traffic;
+       u64 delta;
+       ktime_t now = ktime_get();
+
+       delta = ktime_us_delta(now, cq->prev_ts);
+       if (delta < ENIC_AIC_TS_BREAK)
+               return;
+       cq->prev_ts = now;
+
+       traffic = pkt_size_counter->large_pkt_bytes_cnt +
+                 pkt_size_counter->small_pkt_bytes_cnt;
+       /* The table takes Mbps
+        * traffic *= 8    => bits
+        * traffic *= (10^6 / delta)    => bps
+        * traffic /= 10^6     => Mbps
+        *
+        * Combining, traffic *= (8 / delta)
+        */
+
+       traffic <<= 3;
+       traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
+
+       for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
+               if (traffic < mod_table[index].rx_rate)
+                       break;
+       range_start = (pkt_size_counter->small_pkt_bytes_cnt >
+                      pkt_size_counter->large_pkt_bytes_cnt << 1) ?
+                     rx_coal->small_pkt_range_start :
+                     rx_coal->large_pkt_range_start;
+       timer = range_start + ((rx_coal->range_end - range_start) *
+                              mod_table[index].range_percent / 100);
+       /* Damping */
+       cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
+
+       pkt_size_counter->large_pkt_bytes_cnt = 0;
+       pkt_size_counter->small_pkt_bytes_cnt = 0;
+}
+
 static int enic_poll(struct napi_struct *napi, int budget)
 {
        struct net_device *netdev = napi->dev;
@@ -1199,6 +1257,11 @@ static int enic_poll(struct napi_struct *napi, int budget)
 
        if (err)
                rq_work_done = rq_work_to_do;
+       if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+               /* Call the function which refreshes the intr coalescing timer
+                * value based on the traffic.
+                */
+               enic_calc_int_moderation(enic, &enic->rq[0]);
 
        if (rq_work_done < rq_work_to_do) {
 
@@ -1207,70 +1270,14 @@ static int enic_poll(struct napi_struct *napi, int budget)
                 */
 
                napi_complete(napi);
+               if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+                       enic_set_int_moderation(enic, &enic->rq[0]);
                vnic_intr_unmask(&enic->intr[intr]);
        }
 
        return rq_work_done;
 }
 
-static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
-{
-       unsigned int intr = enic_msix_rq_intr(enic, rq->index);
-       struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
-       u32 timer = cq->tobe_rx_coal_timeval;
-
-       if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
-               vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
-               cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
-       }
-}
-
-static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
-{
-       struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
-       struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
-       struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
-       int index;
-       u32 timer;
-       u32 range_start;
-       u32 traffic;
-       u64 delta;
-       ktime_t now = ktime_get();
-
-       delta = ktime_us_delta(now, cq->prev_ts);
-       if (delta < ENIC_AIC_TS_BREAK)
-               return;
-       cq->prev_ts = now;
-
-       traffic = pkt_size_counter->large_pkt_bytes_cnt +
-                 pkt_size_counter->small_pkt_bytes_cnt;
-       /* The table takes Mbps
-        * traffic *= 8    => bits
-        * traffic *= (10^6 / delta)    => bps
-        * traffic /= 10^6     => Mbps
-        *
-        * Combining, traffic *= (8 / delta)
-        */
-
-       traffic <<= 3;
-       traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
-
-       for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
-               if (traffic < mod_table[index].rx_rate)
-                       break;
-       range_start = (pkt_size_counter->small_pkt_bytes_cnt >
-                      pkt_size_counter->large_pkt_bytes_cnt << 1) ?
-                     rx_coal->small_pkt_range_start :
-                     rx_coal->large_pkt_range_start;
-       timer = range_start + ((rx_coal->range_end - range_start) *
-                              mod_table[index].range_percent / 100);
-       /* Damping */
-       cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
-
-       pkt_size_counter->large_pkt_bytes_cnt = 0;
-       pkt_size_counter->small_pkt_bytes_cnt = 0;
-}
-
 #ifdef CONFIG_RFS_ACCEL
 static void enic_free_rx_cpu_rmap(struct enic *enic)
 {
@@ -1407,10 +1414,8 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
        if (err)
                work_done = work_to_do;
        if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
-               /* Call the function which refreshes
-                * the intr coalescing timer value based on
-                * the traffic.  This is supported only in
-                * the case of MSI-x mode
+               /* Call the function which refreshes the intr coalescing timer
+                * value based on the traffic.
                 */
                enic_calc_int_moderation(enic, &enic->rq[rq]);
 
@@ -1569,12 +1574,6 @@ static void enic_set_rx_coal_setting(struct enic *enic)
        int index = -1;
        struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
 
-       /* If intr mode is not MSIX, do not do adaptive coalescing */
-       if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) {
-               netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing");
-               return;
-       }
-
        /* 1. Read the link speed from fw
         * 2. Pick the default range for the speed
         * 3. Update it in enic->rx_coalesce_setting
index d1017509b08ac1e171a12a89770373a5057c5d64..f7b42483921c5847a883a286d28f7700e10b1d28 100644 (file)
@@ -604,19 +604,7 @@ static struct pci_driver pci_driver = {
        .probe          = ec_bhf_probe,
        .remove         = ec_bhf_remove,
 };
-
-static int __init ec_bhf_init(void)
-{
-       return pci_register_driver(&pci_driver);
-}
-
-static void __exit ec_bhf_exit(void)
-{
-       pci_unregister_driver(&pci_driver);
-}
-
-module_init(ec_bhf_init);
-module_exit(ec_bhf_exit);
+module_pci_driver(pci_driver);
 
 module_param(polling_frequency, long, S_IRUGO);
 MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns");
index 8d12b41b3b1990af468da5a38c4759fc005ba3d3..0a27805cbbbd0e14f2988ffec5f207909628d65b 100644 (file)
@@ -37,7 +37,7 @@
 #include "be_hw.h"
 #include "be_roce.h"
 
-#define DRV_VER                        "10.6.0.2"
+#define DRV_VER                        "10.6.0.3"
 #define DRV_NAME               "be2net"
 #define BE_NAME                        "Emulex BladeEngine2"
 #define BE3_NAME               "Emulex BladeEngine3"
 
 #define MAX_VFS                        30 /* Max VFs supported by BE3 FW */
 #define FW_VER_LEN             32
+#define        CNTL_SERIAL_NUM_WORDS   8  /* Controller serial number words */
+#define        CNTL_SERIAL_NUM_WORD_SZ (sizeof(u16)) /* Byte-sz of serial num word */
 
 #define        RSS_INDIR_TABLE_LEN     128
 #define RSS_HASH_KEY_LEN       40
@@ -228,6 +230,7 @@ struct be_mcc_obj {
 struct be_tx_stats {
        u64 tx_bytes;
        u64 tx_pkts;
+       u64 tx_vxlan_offload_pkts;
        u64 tx_reqs;
        u64 tx_compl;
        ulong tx_jiffies;
@@ -275,6 +278,7 @@ struct be_rx_page_info {
 struct be_rx_stats {
        u64 rx_bytes;
        u64 rx_pkts;
+       u64 rx_vxlan_offload_pkts;
        u32 rx_drops_no_skbs;   /* skb allocation errors */
        u32 rx_drops_no_frags;  /* HW has no fetched frags */
        u32 rx_post_fail;       /* page post alloc failures */
@@ -590,6 +594,7 @@ struct be_adapter {
        struct rss_info rss_info;
        /* Filters for packets that need to be sent to BMC */
        u32 bmc_filt_mask;
+       u16 serial_num[CNTL_SERIAL_NUM_WORDS];
 };
 
 #define be_physfn(adapter)             (!adapter->virtfn)
index 9eac3227d2cabc15c2d21a4baafafc3761372560..3be1fbdcdd0215cbd6589001b3a11c2091d309c2 100644 (file)
@@ -88,19 +88,21 @@ static inline void *embedded_payload(struct be_mcc_wrb *wrb)
        return wrb->payload.embedded_payload;
 }
 
-static void be_mcc_notify(struct be_adapter *adapter)
+static int be_mcc_notify(struct be_adapter *adapter)
 {
        struct be_queue_info *mccq = &adapter->mcc_obj.q;
        u32 val = 0;
 
        if (be_check_error(adapter, BE_ERROR_ANY))
-               return;
+               return -EIO;
 
        val |= mccq->id & DB_MCCQ_RING_ID_MASK;
        val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
 
        wmb();
        iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
+
+       return 0;
 }
 
 /* To check if valid bit is set, check the entire word as we don't know
@@ -170,6 +172,12 @@ static void be_async_cmd_process(struct be_adapter *adapter,
                return;
        }
 
+       if (opcode == OPCODE_LOWLEVEL_SET_LOOPBACK_MODE &&
+           subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
+               complete(&adapter->et_cmd_compl);
+               return;
+       }
+
        if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
             opcode == OPCODE_COMMON_WRITE_OBJECT) &&
            subsystem == CMD_SUBSYSTEM_COMMON) {
@@ -541,7 +549,9 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)
 
        resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
 
-       be_mcc_notify(adapter);
+       status = be_mcc_notify(adapter);
+       if (status)
+               goto out;
 
        status = be_mcc_wait_compl(adapter);
        if (status == -EIO)
@@ -1547,7 +1557,10 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
        else
                hdr->version = 2;
 
-       be_mcc_notify(adapter);
+       status = be_mcc_notify(adapter);
+       if (status)
+               goto err;
+
        adapter->stats_cmd_sent = true;
 
 err:
@@ -1583,7 +1596,10 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
        req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
        req->cmd_params.params.reset_stats = 0;
 
-       be_mcc_notify(adapter);
+       status = be_mcc_notify(adapter);
+       if (status)
+               goto err;
+
        adapter->stats_cmd_sent = true;
 
 err:
@@ -1687,8 +1703,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
                               OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
                               sizeof(*req), wrb, NULL);
 
-       be_mcc_notify(adapter);
-
+       status = be_mcc_notify(adapter);
 err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
@@ -1860,7 +1875,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
                                cpu_to_le32(set_eqd[i].delay_multiplier);
        }
 
-       be_mcc_notify(adapter);
+       status = be_mcc_notify(adapter);
 err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
@@ -1953,7 +1968,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
                        memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
        }
 
-       status = be_mcc_notify_wait(adapter);
+       status = be_mcc_notify(adapter);
 err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
@@ -2320,7 +2335,10 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
        req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
                                sizeof(struct lancer_cmd_req_write_object)));
 
-       be_mcc_notify(adapter);
+       status = be_mcc_notify(adapter);
+       if (status)
+               goto err_unlock;
+
        spin_unlock_bh(&adapter->mcc_lock);
 
        if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
@@ -2491,7 +2509,10 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
        req->params.op_code = cpu_to_le32(flash_opcode);
        req->params.data_buf_size = cpu_to_le32(buf_size);
 
-       be_mcc_notify(adapter);
+       status = be_mcc_notify(adapter);
+       if (status)
+               goto err_unlock;
+
        spin_unlock_bh(&adapter->mcc_lock);
 
        if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
@@ -2585,7 +2606,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
        wrb = wrb_from_mccq(adapter);
        if (!wrb) {
                status = -EBUSY;
-               goto err;
+               goto err_unlock;
        }
 
        req = embedded_payload(wrb);
@@ -2599,8 +2620,19 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
        req->loopback_type = loopback_type;
        req->loopback_state = enable;
 
-       status = be_mcc_notify_wait(adapter);
-err:
+       status = be_mcc_notify(adapter);
+       if (status)
+               goto err_unlock;
+
+       spin_unlock_bh(&adapter->mcc_lock);
+
+       if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
+                                        msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
+               status = -ETIMEDOUT;
+
+       return status;
+
+err_unlock:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
 }
@@ -2636,7 +2668,9 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
        req->num_pkts = cpu_to_le32(num_pkts);
        req->loopback_type = cpu_to_le32(loopback_type);
 
-       be_mcc_notify(adapter);
+       status = be_mcc_notify(adapter);
+       if (status)
+               goto err;
 
        spin_unlock_bh(&adapter->mcc_lock);
 
@@ -2818,10 +2852,11 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_cntl_attribs *req;
        struct be_cmd_resp_cntl_attribs *resp;
-       int status;
+       int status, i;
        int payload_len = max(sizeof(*req), sizeof(*resp));
        struct mgmt_controller_attrib *attribs;
        struct be_dma_mem attribs_cmd;
+       u32 *serial_num;
 
        if (mutex_lock_interruptible(&adapter->mbox_lock))
                return -1;
@@ -2852,6 +2887,10 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
        if (!status) {
                attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
                adapter->hba_port_num = attribs->hba_attribs.phy_port;
+               serial_num = attribs->hba_attribs.controller_serial_number;
+               for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
+                       adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
+                               (BIT_MASK(16) - 1);
        }
 
 err:
index 2716e6f30d9a0949633b40dc9864196c7465fa3a..36d835bd5f3c06f86020e4c192c02cf074bf2340 100644 (file)
@@ -1495,6 +1495,8 @@ struct be_cmd_resp_acpi_wol_magic_config_v1 {
 #define BE_PME_D3COLD_CAP              0x80
 
 /********************** LoopBack test *********************/
+#define SET_LB_MODE_TIMEOUT            12000
+
 struct be_cmd_req_loopback_test {
        struct be_cmd_req_hdr hdr;
        u32 loopback_type;
@@ -1635,10 +1637,12 @@ struct be_cmd_req_set_qos {
 struct mgmt_hba_attribs {
        u32 rsvd0[24];
        u8 controller_model_number[32];
-       u32 rsvd1[79];
-       u8 rsvd2[3];
+       u32 rsvd1[16];
+       u32 controller_serial_number[8];
+       u32 rsvd2[55];
+       u8 rsvd3[3];
        u8 phy_port;
-       u32 rsvd3[13];
+       u32 rsvd4[13];
 } __packed;
 
 struct mgmt_controller_attrib {
@@ -1758,6 +1762,7 @@ struct be_cmd_req_set_mac_list {
 /*********************** HSW Config ***********************/
 #define PORT_FWD_TYPE_VEPA             0x3
 #define PORT_FWD_TYPE_VEB              0x2
+#define PORT_FWD_TYPE_PASSTHRU         0x1
 
 #define ENABLE_MAC_SPOOFCHK            0x2
 #define DISABLE_MAC_SPOOFCHK           0x3
index b2476dbfd103120affb5e216a31d304dda570a67..2c9ed1710ba6f4c16d8a3d800602b6d2337abb59 100644 (file)
@@ -138,6 +138,7 @@ static const struct be_ethtool_stat et_stats[] = {
 static const struct be_ethtool_stat et_rx_stats[] = {
        {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
        {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
+       {DRVSTAT_RX_INFO(rx_vxlan_offload_pkts)},
        {DRVSTAT_RX_INFO(rx_compl)},
        {DRVSTAT_RX_INFO(rx_compl_err)},
        {DRVSTAT_RX_INFO(rx_mcast_pkts)},
@@ -190,6 +191,7 @@ static const struct be_ethtool_stat et_tx_stats[] = {
        {DRVSTAT_TX_INFO(tx_internal_parity_err)},
        {DRVSTAT_TX_INFO(tx_bytes)},
        {DRVSTAT_TX_INFO(tx_pkts)},
+       {DRVSTAT_TX_INFO(tx_vxlan_offload_pkts)},
        /* Number of skbs queued for trasmission by the driver */
        {DRVSTAT_TX_INFO(tx_reqs)},
        /* Number of times the TX queue was stopped due to lack
@@ -847,10 +849,21 @@ err:
 static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
                            u64 *status)
 {
-       be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1);
+       int ret;
+
+       ret = be_cmd_set_loopback(adapter, adapter->hba_port_num,
+                                 loopback_type, 1);
+       if (ret)
+               return ret;
+
        *status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
                                       loopback_type, 1500, 2, 0xabc);
-       be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1);
+
+       ret = be_cmd_set_loopback(adapter, adapter->hba_port_num,
+                                 BE_NO_LOOPBACK, 1);
+       if (ret)
+               return ret;
+
        return *status;
 }
 
index 6f642426308c67399eac3abdb20ae6160ce41d2a..d86bc5d5224627a812ba0a430c21f7a4f23513b3 100644 (file)
@@ -677,11 +677,14 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
 static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
 {
        struct be_tx_stats *stats = tx_stats(txo);
+       u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
 
        u64_stats_update_begin(&stats->sync);
        stats->tx_reqs++;
        stats->tx_bytes += skb->len;
-       stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
+       stats->tx_pkts += tx_pkts;
+       if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
+               stats->tx_vxlan_offload_pkts += tx_pkts;
        u64_stats_update_end(&stats->sync);
 }
 
@@ -1254,7 +1257,7 @@ static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
        if (is_udp_pkt((*skb))) {
                struct udphdr *udp = udp_hdr((*skb));
 
-               switch (udp->dest) {
+               switch (ntohs(udp->dest)) {
                case DHCP_CLIENT_PORT:
                        os2bmc = is_dhcp_client_filt_enabled(adapter);
                        goto done;
@@ -1957,6 +1960,8 @@ static void be_rx_stats_update(struct be_rx_obj *rxo,
        stats->rx_compl++;
        stats->rx_bytes += rxcp->pkt_size;
        stats->rx_pkts++;
+       if (rxcp->tunneled)
+               stats->rx_vxlan_offload_pkts++;
        if (rxcp->pkt_type == BE_MULTICAST_PACKET)
                stats->rx_mcast_pkts++;
        if (rxcp->err)
@@ -3529,15 +3534,15 @@ err:
 
 static int be_setup_wol(struct be_adapter *adapter, bool enable)
 {
+       struct device *dev = &adapter->pdev->dev;
        struct be_dma_mem cmd;
-       int status = 0;
        u8 mac[ETH_ALEN];
+       int status;
 
        eth_zero_addr(mac);
 
        cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
-       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
-                                    GFP_KERNEL);
+       cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
        if (!cmd.va)
                return -ENOMEM;
 
@@ -3546,24 +3551,18 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
                                                PCICFG_PM_CONTROL_OFFSET,
                                                PCICFG_PM_CONTROL_MASK);
                if (status) {
-                       dev_err(&adapter->pdev->dev,
-                               "Could not enable Wake-on-lan\n");
-                       dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
-                                         cmd.dma);
-                       return status;
+                       dev_err(dev, "Could not enable Wake-on-lan\n");
+                       goto err;
                }
-               status = be_cmd_enable_magic_wol(adapter,
-                                                adapter->netdev->dev_addr,
-                                                &cmd);
-               pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
-               pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
        } else {
-               status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
-               pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
-               pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
+               ether_addr_copy(mac, adapter->netdev->dev_addr);
        }
 
-       dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+       status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
+       pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
+       pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
+err:
+       dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
        return status;
 }
 
@@ -4924,7 +4923,7 @@ static bool be_check_ufi_compatibility(struct be_adapter *adapter,
 {
        if (!fhdr) {
                dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
-               return -1;
+               return false;
        }
 
        /* First letter of the build version is used to identify
@@ -5079,9 +5078,6 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
        int status = 0;
        u8 hsw_mode;
 
-       if (!sriov_enabled(adapter))
-               return 0;
-
        /* BE and Lancer chips support VEB mode only */
        if (BEx_chip(adapter) || lancer_chip(adapter)) {
                hsw_mode = PORT_FWD_TYPE_VEB;
@@ -5091,6 +5087,9 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
                                               NULL);
                if (status)
                        return 0;
+
+               if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
+                       return 0;
        }
 
        return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
@@ -5225,6 +5224,27 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
 }
 #endif
 
+static int be_get_phys_port_id(struct net_device *dev,
+                              struct netdev_phys_item_id *ppid)
+{
+       int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
+       struct be_adapter *adapter = netdev_priv(dev);
+       u8 *id;
+
+       if (MAX_PHYS_ITEM_ID_LEN < id_len)
+               return -ENOSPC;
+
+       ppid->id[0] = adapter->hba_port_num + 1;
+       id = &ppid->id[1];
+       for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
+            i--, id += CNTL_SERIAL_NUM_WORD_SZ)
+               memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
+
+       ppid->id_len = id_len;
+
+       return 0;
+}
+
 static const struct net_device_ops be_netdev_ops = {
        .ndo_open               = be_open,
        .ndo_stop               = be_close,
@@ -5255,6 +5275,7 @@ static const struct net_device_ops be_netdev_ops = {
        .ndo_del_vxlan_port     = be_del_vxlan_port,
        .ndo_features_check     = be_features_check,
 #endif
+       .ndo_get_phys_port_id   = be_get_phys_port_id,
 };
 
 static void be_netdev_init(struct net_device *netdev)
@@ -5813,7 +5834,6 @@ static int be_pci_resume(struct pci_dev *pdev)
        if (status)
                return status;
 
-       pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
 
        status = be_resume(adapter);
@@ -5893,7 +5913,6 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
                return PCI_ERS_RESULT_DISCONNECT;
 
        pci_set_master(pdev);
-       pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
 
        /* Check if card is ok and fw is ready */
index f457a23d0bfbd4149332d9bb93506891670ca36d..1543cf0e8ef647c92f1748e474833cf55300a490 100644 (file)
@@ -506,12 +506,6 @@ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
                break;
 
        default:
-               /*
-                * register RXMTRL must be set in order to do V1 packets,
-                * therefore it is not possible to time stamp both V1 Sync and
-                * Delay_Req messages and hardware does not support
-                * timestamping all packets => return error
-                */
                fep->hwts_rx_en = 1;
                config.rx_filter = HWTSTAMP_FILTER_ALL;
                break;
index 2b7610f341b09f4ff293f9916235030487fb2a80..087ffcdc48a312d365ffb24ee4f7c16ddcf18edb 100644 (file)
 
 #define TX_TIMEOUT      (1*HZ)
 
-const char gfar_driver_version[] = "1.3";
+const char gfar_driver_version[] = "2.0";
 
 static int gfar_enet_open(struct net_device *dev);
 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
 static void gfar_reset_task(struct work_struct *work);
 static void gfar_timeout(struct net_device *dev);
 static int gfar_close(struct net_device *dev);
-static struct sk_buff *gfar_new_skb(struct net_device *dev,
-                                   dma_addr_t *bufaddr);
+static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
+                               int alloc_cnt);
 static int gfar_set_mac_address(struct net_device *dev);
 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
 static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -141,8 +141,7 @@ static void gfar_netpoll(struct net_device *dev);
 #endif
 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
-static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
-                              int amount_pull, struct napi_struct *napi);
+static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb);
 static void gfar_halt_nodisable(struct gfar_private *priv);
 static void gfar_clear_exact_match(struct net_device *dev);
 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
@@ -169,17 +168,15 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
        bdp->lstatus = cpu_to_be32(lstatus);
 }
 
-static int gfar_init_bds(struct net_device *ndev)
+static void gfar_init_bds(struct net_device *ndev)
 {
        struct gfar_private *priv = netdev_priv(ndev);
        struct gfar __iomem *regs = priv->gfargrp[0].regs;
        struct gfar_priv_tx_q *tx_queue = NULL;
        struct gfar_priv_rx_q *rx_queue = NULL;
        struct txbd8 *txbdp;
-       struct rxbd8 *rxbdp;
        u32 __iomem *rfbptr;
        int i, j;
-       dma_addr_t bufaddr;
 
        for (i = 0; i < priv->num_tx_queues; i++) {
                tx_queue = priv->tx_queue[i];
@@ -207,40 +204,26 @@ static int gfar_init_bds(struct net_device *ndev)
        rfbptr = &regs->rfbptr0;
        for (i = 0; i < priv->num_rx_queues; i++) {
                rx_queue = priv->rx_queue[i];
-               rx_queue->cur_rx = rx_queue->rx_bd_base;
-               rx_queue->skb_currx = 0;
-               rxbdp = rx_queue->rx_bd_base;
 
-               for (j = 0; j < rx_queue->rx_ring_size; j++) {
-                       struct sk_buff *skb = rx_queue->rx_skbuff[j];
+               rx_queue->next_to_clean = 0;
+               rx_queue->next_to_use = 0;
+               rx_queue->next_to_alloc = 0;
 
-                       if (skb) {
-                               bufaddr = be32_to_cpu(rxbdp->bufPtr);
-                       } else {
-                               skb = gfar_new_skb(ndev, &bufaddr);
-                               if (!skb) {
-                                       netdev_err(ndev, "Can't allocate RX buffers\n");
-                                       return -ENOMEM;
-                               }
-                               rx_queue->rx_skbuff[j] = skb;
-                       }
-
-                       gfar_init_rxbdp(rx_queue, rxbdp, bufaddr);
-                       rxbdp++;
-               }
+               /* make sure next_to_clean != next_to_use after this
+                * by leaving at least 1 unused descriptor
+                */
+               gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
 
                rx_queue->rfbptr = rfbptr;
                rfbptr += 2;
        }
-
-       return 0;
 }
 
 static int gfar_alloc_skb_resources(struct net_device *ndev)
 {
        void *vaddr;
        dma_addr_t addr;
-       int i, j, k;
+       int i, j;
        struct gfar_private *priv = netdev_priv(ndev);
        struct device *dev = priv->dev;
        struct gfar_priv_tx_q *tx_queue = NULL;
@@ -279,7 +262,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
                rx_queue = priv->rx_queue[i];
                rx_queue->rx_bd_base = vaddr;
                rx_queue->rx_bd_dma_base = addr;
-               rx_queue->dev = ndev;
+               rx_queue->ndev = ndev;
+               rx_queue->dev = dev;
                addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
                vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
        }
@@ -294,25 +278,20 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
                if (!tx_queue->tx_skbuff)
                        goto cleanup;
 
-               for (k = 0; k < tx_queue->tx_ring_size; k++)
-                       tx_queue->tx_skbuff[k] = NULL;
+               for (j = 0; j < tx_queue->tx_ring_size; j++)
+                       tx_queue->tx_skbuff[j] = NULL;
        }
 
        for (i = 0; i < priv->num_rx_queues; i++) {
                rx_queue = priv->rx_queue[i];
-               rx_queue->rx_skbuff =
-                       kmalloc_array(rx_queue->rx_ring_size,
-                                     sizeof(*rx_queue->rx_skbuff),
-                                     GFP_KERNEL);
-               if (!rx_queue->rx_skbuff)
+               rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
+                                           sizeof(*rx_queue->rx_buff),
+                                           GFP_KERNEL);
+               if (!rx_queue->rx_buff)
                        goto cleanup;
-
-               for (j = 0; j < rx_queue->rx_ring_size; j++)
-                       rx_queue->rx_skbuff[j] = NULL;
        }
 
-       if (gfar_init_bds(ndev))
-               goto cleanup;
+       gfar_init_bds(ndev);
 
        return 0;
 
@@ -354,10 +333,8 @@ static void gfar_init_rqprm(struct gfar_private *priv)
        }
 }
 
-static void gfar_rx_buff_size_config(struct gfar_private *priv)
+static void gfar_rx_offload_en(struct gfar_private *priv)
 {
-       int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN;
-
        /* set this when rx hw offload (TOE) functions are being used */
        priv->uses_rxfcb = 0;
 
@@ -366,16 +343,6 @@ static void gfar_rx_buff_size_config(struct gfar_private *priv)
 
        if (priv->hwts_rx_en)
                priv->uses_rxfcb = 1;
-
-       if (priv->uses_rxfcb)
-               frame_size += GMAC_FCB_LEN;
-
-       frame_size += priv->padding;
-
-       frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
-                    INCREMENTAL_BUFFER_SIZE;
-
-       priv->rx_buffer_size = frame_size;
 }
 
 static void gfar_mac_rx_config(struct gfar_private *priv)
@@ -593,9 +560,8 @@ static int gfar_alloc_rx_queues(struct gfar_private *priv)
                if (!priv->rx_queue[i])
                        return -ENOMEM;
 
-               priv->rx_queue[i]->rx_skbuff = NULL;
                priv->rx_queue[i]->qindex = i;
-               priv->rx_queue[i]->dev = priv->ndev;
+               priv->rx_queue[i]->ndev = priv->ndev;
        }
        return 0;
 }
@@ -1187,12 +1153,11 @@ void gfar_mac_reset(struct gfar_private *priv)
 
        udelay(3);
 
-       /* Compute rx_buff_size based on config flags */
-       gfar_rx_buff_size_config(priv);
+       gfar_rx_offload_en(priv);
 
        /* Initialize the max receive frame/buffer lengths */
-       gfar_write(&regs->maxfrm, priv->rx_buffer_size);
-       gfar_write(&regs->mrblr, priv->rx_buffer_size);
+       gfar_write(&regs->maxfrm, GFAR_JUMBO_FRAME_SIZE);
+       gfar_write(&regs->mrblr, GFAR_RXB_SIZE);
 
        /* Initialize the Minimum Frame Length Register */
        gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
@@ -1200,12 +1165,11 @@ void gfar_mac_reset(struct gfar_private *priv)
        /* Initialize MACCFG2. */
        tempval = MACCFG2_INIT_SETTINGS;
 
-       /* If the mtu is larger than the max size for standard
-        * ethernet frames (ie, a jumbo frame), then set maccfg2
-        * to allow huge frames, and to check the length
+       /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
+        * are marked as truncated.  Avoid this by MACCFG2[Huge Frame]=1,
+        * and by checking RxBD[LG] and discarding larger than MAXFRM.
         */
-       if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
-           gfar_has_errata(priv, GFAR_ERRATA_74))
+       if (gfar_has_errata(priv, GFAR_ERRATA_74))
                tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
 
        gfar_write(&regs->maccfg2, tempval);
@@ -1415,8 +1379,6 @@ static int gfar_probe(struct platform_device *ofdev)
            priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
                dev->needed_headroom = GMAC_FCB_LEN;
 
-       priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
-
        /* Initializing some of the rx/tx queue level parameters */
        for (i = 0; i < priv->num_tx_queues; i++) {
                priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
@@ -1599,10 +1561,7 @@ static int gfar_restore(struct device *dev)
                return 0;
        }
 
-       if (gfar_init_bds(ndev)) {
-               free_skb_resources(priv);
-               return -ENOMEM;
-       }
+       gfar_init_bds(ndev);
 
        gfar_mac_reset(priv);
 
@@ -1893,26 +1852,32 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
 
 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
 {
-       struct rxbd8 *rxbdp;
-       struct gfar_private *priv = netdev_priv(rx_queue->dev);
        int i;
 
-       rxbdp = rx_queue->rx_bd_base;
+       struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
+
+       if (rx_queue->skb)
+               dev_kfree_skb(rx_queue->skb);
 
        for (i = 0; i < rx_queue->rx_ring_size; i++) {
-               if (rx_queue->rx_skbuff[i]) {
-                       dma_unmap_single(priv->dev, be32_to_cpu(rxbdp->bufPtr),
-                                        priv->rx_buffer_size,
-                                        DMA_FROM_DEVICE);
-                       dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
-                       rx_queue->rx_skbuff[i] = NULL;
-               }
+               struct  gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
+
                rxbdp->lstatus = 0;
                rxbdp->bufPtr = 0;
                rxbdp++;
+
+               if (!rxb->page)
+                       continue;
+
+               dma_unmap_single(rx_queue->dev, rxb->dma,
+                                PAGE_SIZE, DMA_FROM_DEVICE);
+               __free_page(rxb->page);
+
+               rxb->page = NULL;
        }
-       kfree(rx_queue->rx_skbuff);
-       rx_queue->rx_skbuff = NULL;
+
+       kfree(rx_queue->rx_buff);
+       rx_queue->rx_buff = NULL;
 }
 
 /* If there are any tx skbs or rx skbs still around, free them.
@@ -1937,7 +1902,7 @@ static void free_skb_resources(struct gfar_private *priv)
 
        for (i = 0; i < priv->num_rx_queues; i++) {
                rx_queue = priv->rx_queue[i];
-               if (rx_queue->rx_skbuff)
+               if (rx_queue->rx_buff)
                        free_skb_rx_queue(rx_queue);
        }
 
@@ -2495,7 +2460,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
        struct gfar_private *priv = netdev_priv(dev);
        int frame_size = new_mtu + ETH_HLEN;
 
-       if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
+       if ((frame_size < 64) || (frame_size > GFAR_JUMBO_FRAME_SIZE)) {
                netif_err(priv, drv, dev, "Invalid MTU setting\n");
                return -EINVAL;
        }
@@ -2549,15 +2514,6 @@ static void gfar_timeout(struct net_device *dev)
        schedule_work(&priv->reset_task);
 }
 
-static void gfar_align_skb(struct sk_buff *skb)
-{
-       /* We need the data buffer to be aligned properly.  We will reserve
-        * as many bytes as needed to align the data properly
-        */
-       skb_reserve(skb, RXBUF_ALIGNMENT -
-                   (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
-}
-
 /* Interrupt Handler for Transmit complete */
 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
 {
@@ -2615,7 +2571,8 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
 
                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
                        struct skb_shared_hwtstamps shhwtstamps;
-                       u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
+                       u64 *ns = (u64 *)(((uintptr_t)skb->data + 0x10) &
+                                         ~0x7UL);
 
                        memset(&shhwtstamps, 0, sizeof(shhwtstamps));
                        shhwtstamps.hwtstamp = ns_to_ktime(*ns);
@@ -2664,49 +2621,85 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
        netdev_tx_completed_queue(txq, howmany, bytes_sent);
 }
 
-static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
+static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
 {
-       struct gfar_private *priv = netdev_priv(dev);
-       struct sk_buff *skb;
+       struct page *page;
+       dma_addr_t addr;
 
-       skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
-       if (!skb)
-               return NULL;
+       page = dev_alloc_page();
+       if (unlikely(!page))
+               return false;
 
-       gfar_align_skb(skb);
+       addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(rxq->dev, addr))) {
+               __free_page(page);
 
-       return skb;
+               return false;
+       }
+
+       rxb->dma = addr;
+       rxb->page = page;
+       rxb->page_offset = 0;
+
+       return true;
 }
 
-static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
+static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
 {
-       struct gfar_private *priv = netdev_priv(dev);
-       struct sk_buff *skb;
-       dma_addr_t addr;
+       struct gfar_private *priv = netdev_priv(rx_queue->ndev);
+       struct gfar_extra_stats *estats = &priv->extra_stats;
 
-       skb = gfar_alloc_skb(dev);
-       if (!skb)
-               return NULL;
+       netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
+       atomic64_inc(&estats->rx_alloc_err);
+}
 
-       addr = dma_map_single(priv->dev, skb->data,
-                             priv->rx_buffer_size, DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(priv->dev, addr))) {
-               dev_kfree_skb_any(skb);
-               return NULL;
+static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
+                               int alloc_cnt)
+{
+       struct rxbd8 *bdp;
+       struct gfar_rx_buff *rxb;
+       int i;
+
+       i = rx_queue->next_to_use;
+       bdp = &rx_queue->rx_bd_base[i];
+       rxb = &rx_queue->rx_buff[i];
+
+       while (alloc_cnt--) {
+               /* try reuse page */
+               if (unlikely(!rxb->page)) {
+                       if (unlikely(!gfar_new_page(rx_queue, rxb))) {
+                               gfar_rx_alloc_err(rx_queue);
+                               break;
+                       }
+               }
+
+               /* Setup the new RxBD */
+               gfar_init_rxbdp(rx_queue, bdp,
+                               rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
+
+               /* Update to the next pointer */
+               bdp++;
+               rxb++;
+
+               if (unlikely(++i == rx_queue->rx_ring_size)) {
+                       i = 0;
+                       bdp = rx_queue->rx_bd_base;
+                       rxb = rx_queue->rx_buff;
+               }
        }
 
-       *bufaddr = addr;
-       return skb;
+       rx_queue->next_to_use = i;
+       rx_queue->next_to_alloc = i;
 }
 
-static inline void count_errors(unsigned short status, struct net_device *dev)
+static void count_errors(u32 lstatus, struct net_device *ndev)
 {
-       struct gfar_private *priv = netdev_priv(dev);
-       struct net_device_stats *stats = &dev->stats;
+       struct gfar_private *priv = netdev_priv(ndev);
+       struct net_device_stats *stats = &ndev->stats;
        struct gfar_extra_stats *estats = &priv->extra_stats;
 
        /* If the packet was truncated, none of the other errors matter */
-       if (status & RXBD_TRUNCATED) {
+       if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
                stats->rx_length_errors++;
 
                atomic64_inc(&estats->rx_trunc);
@@ -2714,25 +2707,25 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
                return;
        }
        /* Count the errors, if there were any */
-       if (status & (RXBD_LARGE | RXBD_SHORT)) {
+       if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
                stats->rx_length_errors++;
 
-               if (status & RXBD_LARGE)
+               if (lstatus & BD_LFLAG(RXBD_LARGE))
                        atomic64_inc(&estats->rx_large);
                else
                        atomic64_inc(&estats->rx_short);
        }
-       if (status & RXBD_NONOCTET) {
+       if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
                stats->rx_frame_errors++;
                atomic64_inc(&estats->rx_nonoctet);
        }
-       if (status & RXBD_CRCERR) {
+       if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
                atomic64_inc(&estats->rx_crcerr);
                stats->rx_crc_errors++;
        }
-       if (status & RXBD_OVERRUN) {
+       if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
                atomic64_inc(&estats->rx_overrun);
-               stats->rx_crc_errors++;
+               stats->rx_over_errors++;
        }
 }
 
@@ -2783,6 +2776,93 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
        return IRQ_HANDLED;
 }
 
+static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
+                            struct sk_buff *skb, bool first)
+{
+       unsigned int size = lstatus & BD_LENGTH_MASK;
+       struct page *page = rxb->page;
+
+       /* Remove the FCS from the packet length */
+       if (likely(lstatus & BD_LFLAG(RXBD_LAST)))
+               size -= ETH_FCS_LEN;
+
+       if (likely(first))
+               skb_put(skb, size);
+       else
+               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+                               rxb->page_offset + RXBUF_ALIGNMENT,
+                               size, GFAR_RXB_TRUESIZE);
+
+       /* try reuse page */
+       if (unlikely(page_count(page) != 1))
+               return false;
+
+       /* change offset to the other half */
+       rxb->page_offset ^= GFAR_RXB_TRUESIZE;
+
+       atomic_inc(&page->_count);
+
+       return true;
+}
+
+static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
+                              struct gfar_rx_buff *old_rxb)
+{
+       struct gfar_rx_buff *new_rxb;
+       u16 nta = rxq->next_to_alloc;
+
+       new_rxb = &rxq->rx_buff[nta];
+
+       /* find next buf that can reuse a page */
+       nta++;
+       rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
+
+       /* copy page reference */
+       *new_rxb = *old_rxb;
+
+       /* sync for use by the device */
+       dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
+                                        old_rxb->page_offset,
+                                        GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
+}
+
+static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
+                                           u32 lstatus, struct sk_buff *skb)
+{
+       struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
+       struct page *page = rxb->page;
+       bool first = false;
+
+       if (likely(!skb)) {
+               void *buff_addr = page_address(page) + rxb->page_offset;
+
+               skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
+               if (unlikely(!skb)) {
+                       gfar_rx_alloc_err(rx_queue);
+                       return NULL;
+               }
+               skb_reserve(skb, RXBUF_ALIGNMENT);
+               first = true;
+       }
+
+       dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
+                                     GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
+
+       if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
+               /* reuse the free half of the page */
+               gfar_reuse_rx_page(rx_queue, rxb);
+       } else {
+               /* page cannot be reused, unmap it */
+               dma_unmap_page(rx_queue->dev, rxb->dma,
+                              PAGE_SIZE, DMA_FROM_DEVICE);
+       }
+
+       /* clear rxb content */
+       rxb->page = NULL;
+
+       return skb;
+}
+
 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
 {
        /* If valid headers were found, and valid sums
@@ -2797,10 +2877,9 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
 }
 
 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
-static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
-                              int amount_pull, struct napi_struct *napi)
+static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
 {
-       struct gfar_private *priv = netdev_priv(dev);
+       struct gfar_private *priv = netdev_priv(ndev);
        struct rxfcb *fcb = NULL;
 
        /* fcb is at the beginning if exists */
@@ -2809,10 +2888,8 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
        /* Remove the FCB from the skb
         * Remove the padded bytes, if there are any
         */
-       if (amount_pull) {
-               skb_record_rx_queue(skb, fcb->rq);
-               skb_pull(skb, amount_pull);
-       }
+       if (priv->uses_rxfcb)
+               skb_pull(skb, GMAC_FCB_LEN);
 
        /* Get receive timestamp from the skb */
        if (priv->hwts_rx_en) {
@@ -2826,24 +2903,20 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
        if (priv->padding)
                skb_pull(skb, priv->padding);
 
-       if (dev->features & NETIF_F_RXCSUM)
+       if (ndev->features & NETIF_F_RXCSUM)
                gfar_rx_checksum(skb, fcb);
 
        /* Tell the skb what kind of packet this is */
-       skb->protocol = eth_type_trans(skb, dev);
+       skb->protocol = eth_type_trans(skb, ndev);
 
        /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
         * Even if vlan rx accel is disabled, on some chips
         * RXFCB_VLN is pseudo randomly set.
         */
-       if (dev->features & NETIF_F_HW_VLAN_CTAG_RX &&
+       if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
            be16_to_cpu(fcb->flags) & RXFCB_VLN)
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
                                       be16_to_cpu(fcb->vlctl));
-
-       /* Send the packet up the stack */
-       napi_gro_receive(napi, skb);
-
 }
 
 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
@@ -2852,91 +2925,89 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
  */
 int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
 {
-       struct net_device *dev = rx_queue->dev;
-       struct rxbd8 *bdp, *base;
-       struct sk_buff *skb;
-       int pkt_len;
-       int amount_pull;
-       int howmany = 0;
-       struct gfar_private *priv = netdev_priv(dev);
+       struct net_device *ndev = rx_queue->ndev;
+       struct gfar_private *priv = netdev_priv(ndev);
+       struct rxbd8 *bdp;
+       int i, howmany = 0;
+       struct sk_buff *skb = rx_queue->skb;
+       int cleaned_cnt = gfar_rxbd_unused(rx_queue);
+       unsigned int total_bytes = 0, total_pkts = 0;
 
        /* Get the first full descriptor */
-       bdp = rx_queue->cur_rx;
-       base = rx_queue->rx_bd_base;
+       i = rx_queue->next_to_clean;
 
-       amount_pull = priv->uses_rxfcb ? GMAC_FCB_LEN : 0;
+       while (rx_work_limit--) {
+               u32 lstatus;
+
+               if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
+                       gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
+                       cleaned_cnt = 0;
+               }
 
-       while (!(be16_to_cpu(bdp->status) & RXBD_EMPTY) && rx_work_limit--) {
-               struct sk_buff *newskb;
-               dma_addr_t bufaddr;
+               bdp = &rx_queue->rx_bd_base[i];
+               lstatus = be32_to_cpu(bdp->lstatus);
+               if (lstatus & BD_LFLAG(RXBD_EMPTY))
+                       break;
 
+               /* order rx buffer descriptor reads */
                rmb();
 
-               /* Add another skb for the future */
-               newskb = gfar_new_skb(dev, &bufaddr);
+               /* fetch next to clean buffer from the ring */
+               skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
+               if (unlikely(!skb))
+                       break;
 
-               skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
+               cleaned_cnt++;
+               howmany++;
 
-               dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
-                                priv->rx_buffer_size, DMA_FROM_DEVICE);
-
-               if (unlikely(!(be16_to_cpu(bdp->status) & RXBD_ERR) &&
-                            be16_to_cpu(bdp->length) > priv->rx_buffer_size))
-                       bdp->status = cpu_to_be16(RXBD_LARGE);
-
-               /* We drop the frame if we failed to allocate a new buffer */
-               if (unlikely(!newskb ||
-                            !(be16_to_cpu(bdp->status) & RXBD_LAST) ||
-                            be16_to_cpu(bdp->status) & RXBD_ERR)) {
-                       count_errors(be16_to_cpu(bdp->status), dev);
-
-                       if (unlikely(!newskb)) {
-                               newskb = skb;
-                               bufaddr = be32_to_cpu(bdp->bufPtr);
-                       } else if (skb)
-                               dev_kfree_skb(skb);
-               } else {
-                       /* Increment the number of packets */
-                       rx_queue->stats.rx_packets++;
-                       howmany++;
-
-                       if (likely(skb)) {
-                               pkt_len = be16_to_cpu(bdp->length) -
-                                         ETH_FCS_LEN;
-                               /* Remove the FCS from the packet length */
-                               skb_put(skb, pkt_len);
-                               rx_queue->stats.rx_bytes += pkt_len;
-                               skb_record_rx_queue(skb, rx_queue->qindex);
-                               gfar_process_frame(dev, skb, amount_pull,
-                                                  &rx_queue->grp->napi_rx);
+               if (unlikely(++i == rx_queue->rx_ring_size))
+                       i = 0;
 
-                       } else {
-                               netif_warn(priv, rx_err, dev, "Missing skb!\n");
-                               rx_queue->stats.rx_dropped++;
-                               atomic64_inc(&priv->extra_stats.rx_skbmissing);
-                       }
+               rx_queue->next_to_clean = i;
+
+               /* fetch next buffer if not the last in frame */
+               if (!(lstatus & BD_LFLAG(RXBD_LAST)))
+                       continue;
+
+               if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
+                       count_errors(lstatus, ndev);
 
+                       /* discard faulty buffer */
+                       dev_kfree_skb(skb);
+                       skb = NULL;
+                       rx_queue->stats.rx_dropped++;
+                       continue;
                }
 
-               rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
+               /* Increment the number of packets */
+               total_pkts++;
+               total_bytes += skb->len;
 
-               /* Setup the new bdp */
-               gfar_init_rxbdp(rx_queue, bdp, bufaddr);
+               skb_record_rx_queue(skb, rx_queue->qindex);
 
-               /* Update Last Free RxBD pointer for LFC */
-               if (unlikely(rx_queue->rfbptr && priv->tx_actual_en))
-                       gfar_write(rx_queue->rfbptr, (u32)bdp);
+               gfar_process_frame(ndev, skb);
 
-               /* Update to the next pointer */
-               bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
+               /* Send the packet up the stack */
+               napi_gro_receive(&rx_queue->grp->napi_rx, skb);
 
-               /* update to point at the next skb */
-               rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
-                                     RX_RING_MOD_MASK(rx_queue->rx_ring_size);
+               skb = NULL;
        }
 
-       /* Update the current rxbd pointer to be the next one */
-       rx_queue->cur_rx = bdp;
+       /* Store incomplete frames for completion */
+       rx_queue->skb = skb;
+
+       rx_queue->stats.rx_packets += total_pkts;
+       rx_queue->stats.rx_bytes += total_bytes;
+
+       if (cleaned_cnt)
+               gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
+
+       /* Update Last Free RxBD pointer for LFC */
+       if (unlikely(priv->tx_actual_en)) {
+               u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
+
+               gfar_write(rx_queue->rfbptr, bdp_dma);
+       }
 
        return howmany;
 }
@@ -3454,7 +3525,6 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
        struct phy_device *phydev = priv->phydev;
        struct gfar_priv_rx_q *rx_queue = NULL;
        int i;
-       struct rxbd8 *bdp;
 
        if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
                return;
@@ -3511,15 +3581,11 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
                /* Turn last free buffer recording on */
                if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
                        for (i = 0; i < priv->num_rx_queues; i++) {
+                               u32 bdp_dma;
+
                                rx_queue = priv->rx_queue[i];
-                               bdp = rx_queue->cur_rx;
-                               /* skip to previous bd */
-                               bdp = skip_bd(bdp, rx_queue->rx_ring_size - 1,
-                                             rx_queue->rx_bd_base,
-                                             rx_queue->rx_ring_size);
-
-                               if (rx_queue->rfbptr)
-                                       gfar_write(rx_queue->rfbptr, (u32)bdp);
+                               bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
+                               gfar_write(rx_queue->rfbptr, bdp_dma);
                        }
 
                        priv->tx_actual_en = 1;
index 5545e41033686e3d7443811e2ec1caa2da2f5be4..8c1994856e93823174d29c08ed8ae691c1530120 100644 (file)
@@ -71,11 +71,6 @@ struct ethtool_rx_list {
 /* Number of bytes to align the rx bufs to */
 #define RXBUF_ALIGNMENT 64
 
-/* The number of bytes which composes a unit for the purpose of
- * allocating data buffers.  ie-for any given MTU, the data buffer
- * will be the next highest multiple of 512 bytes. */
-#define INCREMENTAL_BUFFER_SIZE 512
-
 #define PHY_INIT_TIMEOUT 100000
 
 #define DRV_NAME "gfar-enet"
@@ -92,6 +87,8 @@ extern const char gfar_driver_version[];
 #define DEFAULT_TX_RING_SIZE   256
 #define DEFAULT_RX_RING_SIZE   256
 
+#define GFAR_RX_BUFF_ALLOC     16
+
 #define GFAR_RX_MAX_RING_SIZE   256
 #define GFAR_TX_MAX_RING_SIZE   256
 
@@ -103,11 +100,14 @@ extern const char gfar_driver_version[];
 #define DEFAULT_RX_LFC_THR  16
 #define DEFAULT_LFC_PTVVAL  4
 
-#define DEFAULT_RX_BUFFER_SIZE  1536
+#define GFAR_RXB_SIZE 1536
+#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \
+                         + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define GFAR_RXB_TRUESIZE 2048
+
 #define TX_RING_MOD_MASK(size) (size-1)
 #define RX_RING_MOD_MASK(size) (size-1)
-#define JUMBO_BUFFER_SIZE 9728
-#define JUMBO_FRAME_SIZE 9600
+#define GFAR_JUMBO_FRAME_SIZE 9600
 
 #define DEFAULT_FIFO_TX_THR 0x100
 #define DEFAULT_FIFO_TX_STARVE 0x40
@@ -640,6 +640,7 @@ struct rmon_mib
 };
 
 struct gfar_extra_stats {
+       atomic64_t rx_alloc_err;
        atomic64_t rx_large;
        atomic64_t rx_short;
        atomic64_t rx_nonoctet;
@@ -651,7 +652,6 @@ struct gfar_extra_stats {
        atomic64_t eberr;
        atomic64_t tx_babt;
        atomic64_t tx_underrun;
-       atomic64_t rx_skbmissing;
        atomic64_t tx_timeout;
 };
 
@@ -1012,34 +1012,42 @@ struct rx_q_stats {
        unsigned long rx_dropped;
 };
 
+struct gfar_rx_buff {
+       dma_addr_t dma;
+       struct page *page;
+       unsigned int page_offset;
+};
+
 /**
  *     struct gfar_priv_rx_q - per rx queue structure
- *     @rx_skbuff: skb pointers
- *     @skb_currx: currently use skb pointer
+ *     @rx_buff: Array of buffer info metadata structs
  *     @rx_bd_base: First rx buffer descriptor
- *     @cur_rx: Next free rx ring entry
+ *     @next_to_use: index of the next buffer to be alloc'd
+ *     @next_to_clean: index of the next buffer to be cleaned
  *     @qindex: index of this queue
- *     @dev: back pointer to the dev structure
+ *     @ndev: back pointer to net_device
  *     @rx_ring_size: Rx ring size
  *     @rxcoalescing: enable/disable rx-coalescing
  *     @rxic: receive interrupt coalescing vlaue
  */
 
 struct gfar_priv_rx_q {
-       struct  sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES);
-       dma_addr_t rx_bd_dma_base;
+       struct  gfar_rx_buff *rx_buff __aligned(SMP_CACHE_BYTES);
        struct  rxbd8 *rx_bd_base;
-       struct  rxbd8 *cur_rx;
-       struct  net_device *dev;
-       struct gfar_priv_grp *grp;
+       struct  net_device *ndev;
+       struct  device *dev;
+       u16 rx_ring_size;
+       u16 qindex;
+       struct  gfar_priv_grp *grp;
+       u16 next_to_clean;
+       u16 next_to_use;
+       u16 next_to_alloc;
+       struct  sk_buff *skb;
        struct rx_q_stats stats;
-       u16     skb_currx;
-       u16     qindex;
-       unsigned int    rx_ring_size;
-       /* RX Coalescing values */
+       u32 __iomem *rfbptr;
        unsigned char rxcoalescing;
        unsigned long rxic;
-       u32 __iomem *rfbptr;
+       dma_addr_t rx_bd_dma_base;
 };
 
 enum gfar_irqinfo_id {
@@ -1109,7 +1117,6 @@ struct gfar_private {
        struct device *dev;
        struct net_device *ndev;
        enum gfar_errata errata;
-       unsigned int rx_buffer_size;
 
        u16 uses_rxfcb;
        u16 padding;
@@ -1292,6 +1299,28 @@ static inline void gfar_clear_txbd_status(struct txbd8 *bdp)
        bdp->lstatus = cpu_to_be32(lstatus);
 }
 
+static inline int gfar_rxbd_unused(struct gfar_priv_rx_q *rxq)
+{
+       if (rxq->next_to_clean > rxq->next_to_use)
+               return rxq->next_to_clean - rxq->next_to_use - 1;
+
+       return rxq->rx_ring_size + rxq->next_to_clean - rxq->next_to_use - 1;
+}
+
+static inline u32 gfar_rxbd_dma_lastfree(struct gfar_priv_rx_q *rxq)
+{
+       struct rxbd8 *bdp;
+       u32 bdp_dma;
+       int i;
+
+       i = rxq->next_to_use ? rxq->next_to_use - 1 : rxq->rx_ring_size - 1;
+       bdp = &rxq->rx_bd_base[i];
+       bdp_dma = lower_32_bits(rxq->rx_bd_dma_base);
+       bdp_dma += (uintptr_t)bdp - (uintptr_t)rxq->rx_bd_base;
+
+       return bdp_dma;
+}
+
 irqreturn_t gfar_receive(int irq, void *dev_id);
 int startup_gfar(struct net_device *dev);
 void stop_gfar(struct net_device *dev);
index 3c0a8f825b630148c29b4cda6ca46b9797a668be..555e461b0cfe272e9944f594e2a584faef242f03 100644 (file)
@@ -61,6 +61,8 @@ static void gfar_gdrvinfo(struct net_device *dev,
                          struct ethtool_drvinfo *drvinfo);
 
 static const char stat_gstrings[][ETH_GSTRING_LEN] = {
+       /* extra stats */
+       "rx-allocation-errors",
        "rx-large-frame-errors",
        "rx-short-frame-errors",
        "rx-non-octet-errors",
@@ -72,8 +74,8 @@ static const char stat_gstrings[][ETH_GSTRING_LEN] = {
        "ethernet-bus-error",
        "tx-babbling-errors",
        "tx-underrun-errors",
-       "rx-skb-missing-errors",
        "tx-timeout-errors",
+       /* rmon stats */
        "tx-rx-64-frames",
        "tx-rx-65-127-frames",
        "tx-rx-128-255-frames",
index d49bee38cd319a0a8c7afd2cad7f1cb1ac7f2ed3..cc2d8b4b18e3e2a99ef303b76809545496089787 100644 (file)
@@ -965,7 +965,6 @@ static struct platform_driver hip04_mac_driver = {
        .remove = hip04_remove,
        .driver = {
                .name           = DRV_NAME,
-               .owner          = THIS_MODULE,
                .of_match_table = hip04_mac_match,
        },
 };
index b3bac25db99cf59ed1cdd2e990962cf129297846..fca0a5be1f0f732cd340dd056e6be86b2fb0a925 100644 (file)
@@ -174,7 +174,6 @@ static struct platform_driver hip04_mdio_driver = {
        .remove = hip04_mdio_remove,
        .driver = {
                .name = "hip04-mdio",
-               .owner = THIS_MODULE,
                .of_match_table = hip04_mdio_match,
        },
 };
index 29bbb628d712b38e8b17529626b85c3532d7942f..7af870a3c549592803a55a5cfea89ea1b15044d3 100644 (file)
@@ -79,6 +79,11 @@ static unsigned int rx_flush __read_mostly = 0;
 module_param(rx_flush, uint, 0644);
 MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
 
+static bool old_large_send __read_mostly;
+module_param(old_large_send, bool, S_IRUGO);
+MODULE_PARM_DESC(old_large_send,
+       "Use old large send method on firmware that supports the new method");
+
 struct ibmveth_stat {
        char name[ETH_GSTRING_LEN];
        int offset;
@@ -101,7 +106,8 @@ struct ibmveth_stat ibmveth_stats[] = {
        { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
        { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
        { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
-       { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) }
+       { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) },
+       { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) }
 };
 
 /* simple methods of getting data from the current rxq entry */
@@ -848,25 +854,91 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
        return rc1 ? rc1 : rc2;
 }
 
+static int ibmveth_set_tso(struct net_device *dev, u32 data)
+{
+       struct ibmveth_adapter *adapter = netdev_priv(dev);
+       unsigned long set_attr, clr_attr, ret_attr;
+       long ret1, ret2;
+       int rc1 = 0, rc2 = 0;
+       int restart = 0;
+
+       if (netif_running(dev)) {
+               restart = 1;
+               adapter->pool_config = 1;
+               ibmveth_close(dev);
+               adapter->pool_config = 0;
+       }
+
+       set_attr = 0;
+       clr_attr = 0;
+
+       if (data)
+               set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
+       else
+               clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
+
+       ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
+
+       if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
+           !old_large_send) {
+               ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
+                                         set_attr, &ret_attr);
+
+               if (ret2 != H_SUCCESS) {
+                       netdev_err(dev, "unable to change tso settings. %d rc=%ld\n",
+                                  data, ret2);
+
+                       h_illan_attributes(adapter->vdev->unit_address,
+                                          set_attr, clr_attr, &ret_attr);
+
+                       if (data == 1)
+                               dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+                       rc1 = -EIO;
+
+               } else {
+                       adapter->fw_large_send_support = data;
+                       adapter->large_send = data;
+               }
+       } else {
+               /* Older firmware version of large send offload does not
+                * support tcp6/ipv6
+                */
+               if (data == 1) {
+                       dev->features &= ~NETIF_F_TSO6;
+                       netdev_info(dev, "TSO feature requires all partitions to have updated driver");
+               }
+               adapter->large_send = data;
+       }
+
+       if (restart)
+               rc2 = ibmveth_open(dev);
+
+       return rc1 ? rc1 : rc2;
+}
+
 static int ibmveth_set_features(struct net_device *dev,
        netdev_features_t features)
 {
        struct ibmveth_adapter *adapter = netdev_priv(dev);
        int rx_csum = !!(features & NETIF_F_RXCSUM);
-       int rc;
-       netdev_features_t changed = features ^ dev->features;
-
-       if (features & NETIF_F_TSO & changed)
-               netdev_info(dev, "TSO feature requires all partitions to have updated driver");
+       int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6));
+       int rc1 = 0, rc2 = 0;
 
-       if (rx_csum == adapter->rx_csum)
-               return 0;
+       if (rx_csum != adapter->rx_csum) {
+               rc1 = ibmveth_set_csum_offload(dev, rx_csum);
+               if (rc1 && !adapter->rx_csum)
+                       dev->features =
+                               features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
+       }
 
-       rc = ibmveth_set_csum_offload(dev, rx_csum);
-       if (rc && !adapter->rx_csum)
-               dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
+       if (large_send != adapter->large_send) {
+               rc2 = ibmveth_set_tso(dev, large_send);
+               if (rc2 && !adapter->large_send)
+                       dev->features =
+                               features & ~(NETIF_F_TSO | NETIF_F_TSO6);
+       }
 
-       return rc;
+       return rc1 ? rc1 : rc2;
 }
 
 static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -917,7 +989,7 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
 
 static int ibmveth_send(struct ibmveth_adapter *adapter,
-                       union ibmveth_buf_desc *descs)
+                       union ibmveth_buf_desc *descs, unsigned long mss)
 {
        unsigned long correlator;
        unsigned int retry_count;
@@ -934,7 +1006,8 @@ static int ibmveth_send(struct ibmveth_adapter *adapter,
                                             descs[0].desc, descs[1].desc,
                                             descs[2].desc, descs[3].desc,
                                             descs[4].desc, descs[5].desc,
-                                            correlator, &correlator);
+                                            correlator, &correlator, mss,
+                                            adapter->fw_large_send_support);
        } while ((ret == H_BUSY) && (retry_count--));
 
        if (ret != H_SUCCESS && ret != H_DROPPED) {
@@ -955,6 +1028,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
        int last, i;
        int force_bounce = 0;
        dma_addr_t dma_addr;
+       unsigned long mss = 0;
 
        /*
         * veth handles a maximum of 6 segments including the header, so
@@ -980,6 +1054,9 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
 
        desc_flags = IBMVETH_BUF_VALID;
 
+       if (skb_is_gso(skb) && adapter->fw_large_send_support)
+               desc_flags |= IBMVETH_BUF_LRG_SND;
+
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                unsigned char *buf = skb_transport_header(skb) +
                                                skb->csum_offset;
@@ -1007,7 +1084,7 @@ retry_bounce:
                descs[0].fields.flags_len = desc_flags | skb->len;
                descs[0].fields.address = adapter->bounce_buffer_dma;
 
-               if (ibmveth_send(adapter, descs)) {
+               if (ibmveth_send(adapter, descs, 0)) {
                        adapter->tx_send_failed++;
                        netdev->stats.tx_dropped++;
                } else {
@@ -1041,16 +1118,23 @@ retry_bounce:
                descs[i+1].fields.address = dma_addr;
        }
 
-       if (skb_is_gso(skb) && !skb_is_gso_v6(skb)) {
-               /* Put -1 in the IP checksum to tell phyp it
-                *  is a largesend packet and put the mss in the TCP checksum.
-                */
-               ip_hdr(skb)->check = 0xffff;
-               tcp_hdr(skb)->check = cpu_to_be16(skb_shinfo(skb)->gso_size);
-               adapter->tx_large_packets++;
+       if (skb_is_gso(skb)) {
+               if (adapter->fw_large_send_support) {
+                       mss = (unsigned long)skb_shinfo(skb)->gso_size;
+                       adapter->tx_large_packets++;
+               } else if (!skb_is_gso_v6(skb)) {
+                       /* Put -1 in the IP checksum to tell phyp it
+                        * is a largesend packet. Put the mss in
+                        * the TCP checksum.
+                        */
+                       ip_hdr(skb)->check = 0xffff;
+                       tcp_hdr(skb)->check =
+                               cpu_to_be16(skb_shinfo(skb)->gso_size);
+                       adapter->tx_large_packets++;
+               }
        }
 
-       if (ibmveth_send(adapter, descs)) {
+       if (ibmveth_send(adapter, descs, mss)) {
                adapter->tx_send_failed++;
                netdev->stats.tx_dropped++;
        } else {
@@ -1401,6 +1485,8 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
        struct ibmveth_adapter *adapter;
        unsigned char *mac_addr_p;
        unsigned int *mcastFilterSize_p;
+       long ret;
+       unsigned long ret_attr;
 
        dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
                dev->unit_address);
@@ -1449,10 +1535,19 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
        SET_NETDEV_DEV(netdev, &dev->dev);
        netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
        netdev->features |= netdev->hw_features;
 
-       /* TSO is disabled by default */
-       netdev->hw_features |= NETIF_F_TSO;
+       ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
+
+       /* If running older firmware, TSO should not be enabled by default */
+       if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
+           !old_large_send) {
+               netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+               netdev->features |= netdev->hw_features;
+       } else {
+               netdev->hw_features |= NETIF_F_TSO;
+       }
 
        memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
 
index 41dedb1fb2ae7403d89f3feed38b4f87197de067..4eade67fe30c32a0c528631b27ab9042bd1d4caf 100644 (file)
@@ -40,6 +40,8 @@
 #define IbmVethMcastRemoveFilter     0x2UL
 #define IbmVethMcastClearFilterTable 0x3UL
 
+#define IBMVETH_ILLAN_LRG_SR_ENABLED   0x0000000000010000UL
+#define IBMVETH_ILLAN_LRG_SND_SUPPORT  0x0000000000008000UL
 #define IBMVETH_ILLAN_PADDED_PKT_CSUM  0x0000000000002000UL
 #define IBMVETH_ILLAN_TRUNK_PRI_MASK   0x0000000000000F00UL
 #define IBMVETH_ILLAN_IPV6_TCP_CSUM            0x0000000000000004UL
 static inline long h_send_logical_lan(unsigned long unit_address,
                unsigned long desc1, unsigned long desc2, unsigned long desc3,
                unsigned long desc4, unsigned long desc5, unsigned long desc6,
-               unsigned long corellator_in, unsigned long *corellator_out)
+               unsigned long corellator_in, unsigned long *corellator_out,
+               unsigned long mss, unsigned long large_send_support)
 {
        long rc;
        unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
 
-       rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address, desc1,
-                       desc2, desc3, desc4, desc5, desc6, corellator_in);
+       if (large_send_support)
+               rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
+                                 desc1, desc2, desc3, desc4, desc5, desc6,
+                                 corellator_in, mss);
+       else
+               rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
+                                 desc1, desc2, desc3, desc4, desc5, desc6,
+                                 corellator_in);
 
        *corellator_out = retbuf[0];
 
@@ -147,11 +156,13 @@ struct ibmveth_adapter {
     struct ibmveth_rx_q rx_queue;
     int pool_config;
     int rx_csum;
+    int large_send;
     void *bounce_buffer;
     dma_addr_t bounce_buffer_dma;
 
     u64 fw_ipv6_csum_support;
     u64 fw_ipv4_csum_support;
+    u64 fw_large_send_support;
     /* adapter specific stats */
     u64 replenish_task_cycles;
     u64 replenish_no_mem;
@@ -182,6 +193,7 @@ struct ibmveth_buf_desc_fields {
 #endif
 #define IBMVETH_BUF_VALID      0x80000000
 #define IBMVETH_BUF_TOGGLE     0x40000000
+#define IBMVETH_BUF_LRG_SND     0x04000000
 #define IBMVETH_BUF_NO_CSUM    0x02000000
 #define IBMVETH_BUF_CSUM_GOOD  0x01000000
 #define IBMVETH_BUF_LEN_MASK   0x00FFFFFF
index 89d788d8f263e5c362c10166dc76fa59f517e12c..fea1601f32a3614122a75e03fd98e786cab2ac2a 100644 (file)
@@ -4588,6 +4588,7 @@ static int e1000_open(struct net_device *netdev)
        return 0;
 
 err_req_irq:
+       pm_qos_remove_request(&adapter->pm_qos_req);
        e1000e_release_hw_control(adapter);
        e1000_power_down_phy(adapter);
        e1000e_free_rx_resources(adapter->rx_ring);
index ec76c3fa3a041158dcb5c21872afd5dd8352b9aa..281fd8456146190427a0390cedd3cfb2806d31af 100644 (file)
@@ -98,7 +98,7 @@
 #define I40E_INT_NAME_STR_LEN        (IFNAMSIZ + 9)
 
 /* Ethtool Private Flags */
-#define I40E_PRIV_FLAGS_NPAR_FLAG      (1 << 0)
+#define I40E_PRIV_FLAGS_NPAR_FLAG      BIT(0)
 
 #define I40E_NVM_VERSION_LO_SHIFT  0
 #define I40E_NVM_VERSION_LO_MASK   (0xff << I40E_NVM_VERSION_LO_SHIFT)
@@ -289,35 +289,35 @@ struct i40e_pf {
        struct work_struct service_task;
 
        u64 flags;
-#define I40E_FLAG_RX_CSUM_ENABLED              (u64)(1 << 1)
-#define I40E_FLAG_MSI_ENABLED                  (u64)(1 << 2)
-#define I40E_FLAG_MSIX_ENABLED                 (u64)(1 << 3)
-#define I40E_FLAG_RX_1BUF_ENABLED              (u64)(1 << 4)
-#define I40E_FLAG_RX_PS_ENABLED                (u64)(1 << 5)
-#define I40E_FLAG_RSS_ENABLED                  (u64)(1 << 6)
-#define I40E_FLAG_VMDQ_ENABLED                 (u64)(1 << 7)
-#define I40E_FLAG_FDIR_REQUIRES_REINIT         (u64)(1 << 8)
-#define I40E_FLAG_NEED_LINK_UPDATE             (u64)(1 << 9)
+#define I40E_FLAG_RX_CSUM_ENABLED              BIT_ULL(1)
+#define I40E_FLAG_MSI_ENABLED                  BIT_ULL(2)
+#define I40E_FLAG_MSIX_ENABLED                 BIT_ULL(3)
+#define I40E_FLAG_RX_1BUF_ENABLED              BIT_ULL(4)
+#define I40E_FLAG_RX_PS_ENABLED                        BIT_ULL(5)
+#define I40E_FLAG_RSS_ENABLED                  BIT_ULL(6)
+#define I40E_FLAG_VMDQ_ENABLED                 BIT_ULL(7)
+#define I40E_FLAG_FDIR_REQUIRES_REINIT         BIT_ULL(8)
+#define I40E_FLAG_NEED_LINK_UPDATE             BIT_ULL(9)
 #ifdef I40E_FCOE
-#define I40E_FLAG_FCOE_ENABLED                 (u64)(1 << 11)
+#define I40E_FLAG_FCOE_ENABLED                 BIT_ULL(11)
 #endif /* I40E_FCOE */
-#define I40E_FLAG_IN_NETPOLL                   (u64)(1 << 12)
-#define I40E_FLAG_16BYTE_RX_DESC_ENABLED       (u64)(1 << 13)
-#define I40E_FLAG_CLEAN_ADMINQ                 (u64)(1 << 14)
-#define I40E_FLAG_FILTER_SYNC                  (u64)(1 << 15)
-#define I40E_FLAG_PROCESS_MDD_EVENT            (u64)(1 << 17)
-#define I40E_FLAG_PROCESS_VFLR_EVENT           (u64)(1 << 18)
-#define I40E_FLAG_SRIOV_ENABLED                (u64)(1 << 19)
-#define I40E_FLAG_DCB_ENABLED                  (u64)(1 << 20)
-#define I40E_FLAG_FD_SB_ENABLED                (u64)(1 << 21)
-#define I40E_FLAG_FD_ATR_ENABLED               (u64)(1 << 22)
-#define I40E_FLAG_PTP                          (u64)(1 << 25)
-#define I40E_FLAG_MFP_ENABLED                  (u64)(1 << 26)
+#define I40E_FLAG_IN_NETPOLL                   BIT_ULL(12)
+#define I40E_FLAG_16BYTE_RX_DESC_ENABLED       BIT_ULL(13)
+#define I40E_FLAG_CLEAN_ADMINQ                 BIT_ULL(14)
+#define I40E_FLAG_FILTER_SYNC                  BIT_ULL(15)
+#define I40E_FLAG_PROCESS_MDD_EVENT            BIT_ULL(17)
+#define I40E_FLAG_PROCESS_VFLR_EVENT           BIT_ULL(18)
+#define I40E_FLAG_SRIOV_ENABLED                        BIT_ULL(19)
+#define I40E_FLAG_DCB_ENABLED                  BIT_ULL(20)
+#define I40E_FLAG_FD_SB_ENABLED                        BIT_ULL(21)
+#define I40E_FLAG_FD_ATR_ENABLED               BIT_ULL(22)
+#define I40E_FLAG_PTP                          BIT_ULL(25)
+#define I40E_FLAG_MFP_ENABLED                  BIT_ULL(26)
 #ifdef CONFIG_I40E_VXLAN
-#define I40E_FLAG_VXLAN_FILTER_SYNC            (u64)(1 << 27)
+#define I40E_FLAG_VXLAN_FILTER_SYNC            BIT_ULL(27)
 #endif
-#define I40E_FLAG_PORT_ID_VALID                (u64)(1 << 28)
-#define I40E_FLAG_DCB_CAPABLE                  (u64)(1 << 29)
+#define I40E_FLAG_PORT_ID_VALID                        BIT_ULL(28)
+#define I40E_FLAG_DCB_CAPABLE                  BIT_ULL(29)
 #define I40E_FLAG_VEB_MODE_ENABLED             BIT_ULL(40)
 
        /* tracks features that get auto disabled by errors */
@@ -443,8 +443,8 @@ struct i40e_vsi {
 
        u32 current_netdev_flags;
        unsigned long state;
-#define I40E_VSI_FLAG_FILTER_CHANGED  (1<<0)
-#define I40E_VSI_FLAG_VEB_OWNER       (1<<1)
+#define I40E_VSI_FLAG_FILTER_CHANGED   BIT(0)
+#define I40E_VSI_FLAG_VEB_OWNER                BIT(1)
        unsigned long flags;
 
        struct list_head mac_filter_list;
index 929e3d72a01e5aa6901787fa708f046198af0a22..9101f5c00f37104dd993049e6a34e9103d5a627a 100644 (file)
@@ -34,7 +34,7 @@
  */
 
 #define I40E_FW_API_VERSION_MAJOR      0x0001
-#define I40E_FW_API_VERSION_MINOR      0x0002
+#define I40E_FW_API_VERSION_MINOR      0x0004
 
 struct i40e_aq_desc {
        __le16 flags;
@@ -132,12 +132,7 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_list_func_capabilities     = 0x000A,
        i40e_aqc_opc_list_dev_capabilities      = 0x000B,
 
-       i40e_aqc_opc_set_cppm_configuration     = 0x0103,
-       i40e_aqc_opc_set_arp_proxy_entry        = 0x0104,
-       i40e_aqc_opc_set_ns_proxy_entry         = 0x0105,
-
        /* LAA */
-       i40e_aqc_opc_mng_laa            = 0x0106,   /* AQ obsolete */
        i40e_aqc_opc_mac_address_read   = 0x0107,
        i40e_aqc_opc_mac_address_write  = 0x0108,
 
@@ -262,7 +257,6 @@ enum i40e_admin_queue_opc {
        /* Tunnel commands */
        i40e_aqc_opc_add_udp_tunnel     = 0x0B00,
        i40e_aqc_opc_del_udp_tunnel     = 0x0B01,
-       i40e_aqc_opc_tunnel_key_structure       = 0x0B10,
 
        /* Async Events */
        i40e_aqc_opc_event_lan_overflow         = 0x1001,
@@ -274,8 +268,6 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_oem_ocbb_initialize        = 0xFE03,
 
        /* debug commands */
-       i40e_aqc_opc_debug_get_deviceid         = 0xFF00,
-       i40e_aqc_opc_debug_set_mode             = 0xFF01,
        i40e_aqc_opc_debug_read_reg             = 0xFF03,
        i40e_aqc_opc_debug_write_reg            = 0xFF04,
        i40e_aqc_opc_debug_modify_reg           = 0xFF07,
@@ -509,7 +501,8 @@ struct i40e_aqc_mac_address_read {
 #define I40E_AQC_SAN_ADDR_VALID                0x20
 #define I40E_AQC_PORT_ADDR_VALID       0x40
 #define I40E_AQC_WOL_ADDR_VALID                0x80
-#define I40E_AQC_ADDR_VALID_MASK       0xf0
+#define I40E_AQC_MC_MAG_EN_VALID       0x100
+#define I40E_AQC_ADDR_VALID_MASK       0x1F0
        u8      reserved[6];
        __le32  addr_high;
        __le32  addr_low;
@@ -532,7 +525,9 @@ struct i40e_aqc_mac_address_write {
 #define I40E_AQC_WRITE_TYPE_LAA_ONLY   0x0000
 #define I40E_AQC_WRITE_TYPE_LAA_WOL    0x4000
 #define I40E_AQC_WRITE_TYPE_PORT       0x8000
-#define I40E_AQC_WRITE_TYPE_MASK       0xc000
+#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG      0xC000
+#define I40E_AQC_WRITE_TYPE_MASK       0xC000
+
        __le16  mac_sah;
        __le32  mac_sal;
        u8      reserved[8];
@@ -1068,6 +1063,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
        __le16  seid;
 #define I40E_AQC_VSI_PROM_CMD_SEID_MASK                0x3FF
        __le16  vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_MASK             0x0FFF
 #define I40E_AQC_SET_VSI_VLAN_VALID            0x8000
        u8      reserved[8];
 };
@@ -2064,6 +2060,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
 #define I40E_AQC_CEE_PFC_STATUS_MASK   (0x7 << I40E_AQC_CEE_PFC_STATUS_SHIFT)
 #define I40E_AQC_CEE_APP_STATUS_SHIFT  0x8
 #define I40E_AQC_CEE_APP_STATUS_MASK   (0x7 << I40E_AQC_CEE_APP_STATUS_SHIFT)
+#define I40E_AQC_CEE_FCOE_STATUS_SHIFT 0x8
+#define I40E_AQC_CEE_FCOE_STATUS_MASK  (0x7 << I40E_AQC_CEE_FCOE_STATUS_SHIFT)
+#define I40E_AQC_CEE_ISCSI_STATUS_SHIFT        0xA
+#define I40E_AQC_CEE_ISCSI_STATUS_MASK (0x7 << I40E_AQC_CEE_ISCSI_STATUS_SHIFT)
+#define I40E_AQC_CEE_FIP_STATUS_SHIFT  0x10
+#define I40E_AQC_CEE_FIP_STATUS_MASK   (0x7 << I40E_AQC_CEE_FIP_STATUS_SHIFT)
 struct i40e_aqc_get_cee_dcb_cfg_v1_resp {
        u8      reserved1;
        u8      oper_num_tc;
index 0bae22da014db05d9cd7a56d5a9d21902e49ca41..167ca0d752ea8065c04a731395029d40eb90a888 100644 (file)
@@ -71,6 +71,212 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
        return status;
 }
 
+/**
+ * i40e_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+       switch (aq_err) {
+       case I40E_AQ_RC_OK:
+               return "OK";
+       case I40E_AQ_RC_EPERM:
+               return "I40E_AQ_RC_EPERM";
+       case I40E_AQ_RC_ENOENT:
+               return "I40E_AQ_RC_ENOENT";
+       case I40E_AQ_RC_ESRCH:
+               return "I40E_AQ_RC_ESRCH";
+       case I40E_AQ_RC_EINTR:
+               return "I40E_AQ_RC_EINTR";
+       case I40E_AQ_RC_EIO:
+               return "I40E_AQ_RC_EIO";
+       case I40E_AQ_RC_ENXIO:
+               return "I40E_AQ_RC_ENXIO";
+       case I40E_AQ_RC_E2BIG:
+               return "I40E_AQ_RC_E2BIG";
+       case I40E_AQ_RC_EAGAIN:
+               return "I40E_AQ_RC_EAGAIN";
+       case I40E_AQ_RC_ENOMEM:
+               return "I40E_AQ_RC_ENOMEM";
+       case I40E_AQ_RC_EACCES:
+               return "I40E_AQ_RC_EACCES";
+       case I40E_AQ_RC_EFAULT:
+               return "I40E_AQ_RC_EFAULT";
+       case I40E_AQ_RC_EBUSY:
+               return "I40E_AQ_RC_EBUSY";
+       case I40E_AQ_RC_EEXIST:
+               return "I40E_AQ_RC_EEXIST";
+       case I40E_AQ_RC_EINVAL:
+               return "I40E_AQ_RC_EINVAL";
+       case I40E_AQ_RC_ENOTTY:
+               return "I40E_AQ_RC_ENOTTY";
+       case I40E_AQ_RC_ENOSPC:
+               return "I40E_AQ_RC_ENOSPC";
+       case I40E_AQ_RC_ENOSYS:
+               return "I40E_AQ_RC_ENOSYS";
+       case I40E_AQ_RC_ERANGE:
+               return "I40E_AQ_RC_ERANGE";
+       case I40E_AQ_RC_EFLUSHED:
+               return "I40E_AQ_RC_EFLUSHED";
+       case I40E_AQ_RC_BAD_ADDR:
+               return "I40E_AQ_RC_BAD_ADDR";
+       case I40E_AQ_RC_EMODE:
+               return "I40E_AQ_RC_EMODE";
+       case I40E_AQ_RC_EFBIG:
+               return "I40E_AQ_RC_EFBIG";
+       }
+
+       snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+       return hw->err_str;
+}
+
+/**
+ * i40e_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+{
+       switch (stat_err) {
+       case 0:
+               return "OK";
+       case I40E_ERR_NVM:
+               return "I40E_ERR_NVM";
+       case I40E_ERR_NVM_CHECKSUM:
+               return "I40E_ERR_NVM_CHECKSUM";
+       case I40E_ERR_PHY:
+               return "I40E_ERR_PHY";
+       case I40E_ERR_CONFIG:
+               return "I40E_ERR_CONFIG";
+       case I40E_ERR_PARAM:
+               return "I40E_ERR_PARAM";
+       case I40E_ERR_MAC_TYPE:
+               return "I40E_ERR_MAC_TYPE";
+       case I40E_ERR_UNKNOWN_PHY:
+               return "I40E_ERR_UNKNOWN_PHY";
+       case I40E_ERR_LINK_SETUP:
+               return "I40E_ERR_LINK_SETUP";
+       case I40E_ERR_ADAPTER_STOPPED:
+               return "I40E_ERR_ADAPTER_STOPPED";
+       case I40E_ERR_INVALID_MAC_ADDR:
+               return "I40E_ERR_INVALID_MAC_ADDR";
+       case I40E_ERR_DEVICE_NOT_SUPPORTED:
+               return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+       case I40E_ERR_MASTER_REQUESTS_PENDING:
+               return "I40E_ERR_MASTER_REQUESTS_PENDING";
+       case I40E_ERR_INVALID_LINK_SETTINGS:
+               return "I40E_ERR_INVALID_LINK_SETTINGS";
+       case I40E_ERR_AUTONEG_NOT_COMPLETE:
+               return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+       case I40E_ERR_RESET_FAILED:
+               return "I40E_ERR_RESET_FAILED";
+       case I40E_ERR_SWFW_SYNC:
+               return "I40E_ERR_SWFW_SYNC";
+       case I40E_ERR_NO_AVAILABLE_VSI:
+               return "I40E_ERR_NO_AVAILABLE_VSI";
+       case I40E_ERR_NO_MEMORY:
+               return "I40E_ERR_NO_MEMORY";
+       case I40E_ERR_BAD_PTR:
+               return "I40E_ERR_BAD_PTR";
+       case I40E_ERR_RING_FULL:
+               return "I40E_ERR_RING_FULL";
+       case I40E_ERR_INVALID_PD_ID:
+               return "I40E_ERR_INVALID_PD_ID";
+       case I40E_ERR_INVALID_QP_ID:
+               return "I40E_ERR_INVALID_QP_ID";
+       case I40E_ERR_INVALID_CQ_ID:
+               return "I40E_ERR_INVALID_CQ_ID";
+       case I40E_ERR_INVALID_CEQ_ID:
+               return "I40E_ERR_INVALID_CEQ_ID";
+       case I40E_ERR_INVALID_AEQ_ID:
+               return "I40E_ERR_INVALID_AEQ_ID";
+       case I40E_ERR_INVALID_SIZE:
+               return "I40E_ERR_INVALID_SIZE";
+       case I40E_ERR_INVALID_ARP_INDEX:
+               return "I40E_ERR_INVALID_ARP_INDEX";
+       case I40E_ERR_INVALID_FPM_FUNC_ID:
+               return "I40E_ERR_INVALID_FPM_FUNC_ID";
+       case I40E_ERR_QP_INVALID_MSG_SIZE:
+               return "I40E_ERR_QP_INVALID_MSG_SIZE";
+       case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+               return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+       case I40E_ERR_INVALID_FRAG_COUNT:
+               return "I40E_ERR_INVALID_FRAG_COUNT";
+       case I40E_ERR_QUEUE_EMPTY:
+               return "I40E_ERR_QUEUE_EMPTY";
+       case I40E_ERR_INVALID_ALIGNMENT:
+               return "I40E_ERR_INVALID_ALIGNMENT";
+       case I40E_ERR_FLUSHED_QUEUE:
+               return "I40E_ERR_FLUSHED_QUEUE";
+       case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+               return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+       case I40E_ERR_INVALID_IMM_DATA_SIZE:
+               return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+       case I40E_ERR_TIMEOUT:
+               return "I40E_ERR_TIMEOUT";
+       case I40E_ERR_OPCODE_MISMATCH:
+               return "I40E_ERR_OPCODE_MISMATCH";
+       case I40E_ERR_CQP_COMPL_ERROR:
+               return "I40E_ERR_CQP_COMPL_ERROR";
+       case I40E_ERR_INVALID_VF_ID:
+               return "I40E_ERR_INVALID_VF_ID";
+       case I40E_ERR_INVALID_HMCFN_ID:
+               return "I40E_ERR_INVALID_HMCFN_ID";
+       case I40E_ERR_BACKING_PAGE_ERROR:
+               return "I40E_ERR_BACKING_PAGE_ERROR";
+       case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+               return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+       case I40E_ERR_INVALID_PBLE_INDEX:
+               return "I40E_ERR_INVALID_PBLE_INDEX";
+       case I40E_ERR_INVALID_SD_INDEX:
+               return "I40E_ERR_INVALID_SD_INDEX";
+       case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+               return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+       case I40E_ERR_INVALID_SD_TYPE:
+               return "I40E_ERR_INVALID_SD_TYPE";
+       case I40E_ERR_MEMCPY_FAILED:
+               return "I40E_ERR_MEMCPY_FAILED";
+       case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+               return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+       case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+               return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+       case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+               return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+       case I40E_ERR_SRQ_ENABLED:
+               return "I40E_ERR_SRQ_ENABLED";
+       case I40E_ERR_ADMIN_QUEUE_ERROR:
+               return "I40E_ERR_ADMIN_QUEUE_ERROR";
+       case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+               return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+       case I40E_ERR_BUF_TOO_SHORT:
+               return "I40E_ERR_BUF_TOO_SHORT";
+       case I40E_ERR_ADMIN_QUEUE_FULL:
+               return "I40E_ERR_ADMIN_QUEUE_FULL";
+       case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+               return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+       case I40E_ERR_BAD_IWARP_CQE:
+               return "I40E_ERR_BAD_IWARP_CQE";
+       case I40E_ERR_NVM_BLANK_MODE:
+               return "I40E_ERR_NVM_BLANK_MODE";
+       case I40E_ERR_NOT_IMPLEMENTED:
+               return "I40E_ERR_NOT_IMPLEMENTED";
+       case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+               return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+       case I40E_ERR_DIAG_TEST_FAILED:
+               return "I40E_ERR_DIAG_TEST_FAILED";
+       case I40E_ERR_NOT_READY:
+               return "I40E_ERR_NOT_READY";
+       case I40E_NOT_SUPPORTED:
+               return "I40E_NOT_SUPPORTED";
+       case I40E_ERR_FIRMWARE_API_VERSION:
+               return "I40E_ERR_FIRMWARE_API_VERSION";
+       }
+
+       snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+       return hw->err_str;
+}
+
 /**
  * i40e_debug_aq
  * @hw: debug mask related to admin queue
@@ -1187,9 +1393,9 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
                        blink = false;
 
                if (blink)
-                       gpio_val |= (1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+                       gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
                else
-                       gpio_val &= ~(1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
+                       gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT);
 
                wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val);
                break;
@@ -2391,7 +2597,7 @@ i40e_aq_erase_nvm_exit:
 #define I40E_DEV_FUNC_CAP_MSIX_VF      0x44
 #define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR        0x45
 #define I40E_DEV_FUNC_CAP_IEEE_1588    0x46
-#define I40E_DEV_FUNC_CAP_MFP_MODE_1   0xF1
+#define I40E_DEV_FUNC_CAP_FLEX10       0xF1
 #define I40E_DEV_FUNC_CAP_CEM          0xF2
 #define I40E_DEV_FUNC_CAP_IWARP                0x51
 #define I40E_DEV_FUNC_CAP_LED          0x61
@@ -2416,6 +2622,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
        u32 valid_functions, num_functions;
        u32 number, logical_id, phys_id;
        struct i40e_hw_capabilities *p;
+       u8 major_rev;
        u32 i = 0;
        u16 id;
 
@@ -2433,6 +2640,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
                number = le32_to_cpu(cap->number);
                logical_id = le32_to_cpu(cap->logical_id);
                phys_id = le32_to_cpu(cap->phys_id);
+               major_rev = cap->major_rev;
 
                switch (id) {
                case I40E_DEV_FUNC_CAP_SWITCH_MODE:
@@ -2507,9 +2715,21 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
                case I40E_DEV_FUNC_CAP_MSIX_VF:
                        p->num_msix_vectors_vf = number;
                        break;
-               case I40E_DEV_FUNC_CAP_MFP_MODE_1:
-                       if (number == 1)
-                               p->mfp_mode_1 = true;
+               case I40E_DEV_FUNC_CAP_FLEX10:
+                       if (major_rev == 1) {
+                               if (number == 1) {
+                                       p->flex10_enable = true;
+                                       p->flex10_capable = true;
+                               }
+                       } else {
+                               /* Capability revision >= 2 */
+                               if (number & 1)
+                                       p->flex10_enable = true;
+                               if (number & 2)
+                                       p->flex10_capable = true;
+                       }
+                       p->flex10_mode = logical_id;
+                       p->flex10_status = phys_id;
                        break;
                case I40E_DEV_FUNC_CAP_CEM:
                        if (number == 1)
@@ -2557,7 +2777,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
        /* Software override ensuring FCoE is disabled if npar or mfp
         * mode because it is not supported in these modes.
         */
-       if (p->npar_enable || p->mfp_mode_1)
+       if (p->npar_enable || p->flex10_enable)
                p->fcoe = false;
 
        /* count the enabled ports (aka the "not disabled" ports) */
index e137e3fac8ee2fd280ffaac6ac5924365e3abbdc..50fc894a4cde3b78fa0b9bbc5edf8564532bbd29 100644 (file)
@@ -58,9 +58,9 @@
 #define I40E_IEEE_ETS_MAXTC_SHIFT      0
 #define I40E_IEEE_ETS_MAXTC_MASK       (0x7 << I40E_IEEE_ETS_MAXTC_SHIFT)
 #define I40E_IEEE_ETS_CBS_SHIFT                6
-#define I40E_IEEE_ETS_CBS_MASK         (0x1 << I40E_IEEE_ETS_CBS_SHIFT)
+#define I40E_IEEE_ETS_CBS_MASK         BIT(I40E_IEEE_ETS_CBS_SHIFT)
 #define I40E_IEEE_ETS_WILLING_SHIFT    7
-#define I40E_IEEE_ETS_WILLING_MASK     (0x1 << I40E_IEEE_ETS_WILLING_SHIFT)
+#define I40E_IEEE_ETS_WILLING_MASK     BIT(I40E_IEEE_ETS_WILLING_SHIFT)
 #define I40E_IEEE_ETS_PRIO_0_SHIFT     0
 #define I40E_IEEE_ETS_PRIO_0_MASK      (0x7 << I40E_IEEE_ETS_PRIO_0_SHIFT)
 #define I40E_IEEE_ETS_PRIO_1_SHIFT     4
@@ -79,9 +79,9 @@
 #define I40E_IEEE_PFC_CAP_SHIFT                0
 #define I40E_IEEE_PFC_CAP_MASK         (0xF << I40E_IEEE_PFC_CAP_SHIFT)
 #define I40E_IEEE_PFC_MBC_SHIFT                6
-#define I40E_IEEE_PFC_MBC_MASK         (0x1 << I40E_IEEE_PFC_MBC_SHIFT)
+#define I40E_IEEE_PFC_MBC_MASK         BIT(I40E_IEEE_PFC_MBC_SHIFT)
 #define I40E_IEEE_PFC_WILLING_SHIFT    7
-#define I40E_IEEE_PFC_WILLING_MASK     (0x1 << I40E_IEEE_PFC_WILLING_SHIFT)
+#define I40E_IEEE_PFC_WILLING_MASK     BIT(I40E_IEEE_PFC_WILLING_SHIFT)
 
 /* Defines for IEEE APP TLV */
 #define I40E_IEEE_APP_SEL_SHIFT                0
index bd5079d5c1b682016db7a166c11c9a0e9f392b38..1c51f736a8d0ab54bba24fa8160fddd1f1d93174 100644 (file)
@@ -187,7 +187,7 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
        /* Set up all the App TLVs if DCBx is negotiated */
        for (i = 0; i < dcbxcfg->numapps; i++) {
                prio = dcbxcfg->app[i].priority;
-               tc_map = (1 << dcbxcfg->etscfg.prioritytable[prio]);
+               tc_map = BIT(dcbxcfg->etscfg.prioritytable[prio]);
 
                /* Add APP only if the TC is enabled for this VSI */
                if (tc_map & vsi->tc_config.enabled_tc) {
index da0faf478af076199e4281b0f3da57ad92c5e62b..d7c15d17faa634c1cb901fc360619e47436dfb39 100644 (file)
@@ -964,7 +964,7 @@ static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
                pf->auto_disable_flags |= flag;
        }
        dev_info(&pf->pdev->dev, "requesting a PF reset\n");
-       i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
+       i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
 }
 
 #define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
@@ -1471,19 +1471,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                }
        } else if (strncmp(cmd_buf, "pfr", 3) == 0) {
                dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
-               i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
+               i40e_do_reset_safe(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
        } else if (strncmp(cmd_buf, "corer", 5) == 0) {
                dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
-               i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));
+               i40e_do_reset_safe(pf, BIT(__I40E_CORE_RESET_REQUESTED));
 
        } else if (strncmp(cmd_buf, "globr", 5) == 0) {
                dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
-               i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
+               i40e_do_reset_safe(pf, BIT(__I40E_GLOBAL_RESET_REQUESTED));
 
        } else if (strncmp(cmd_buf, "empr", 4) == 0) {
                dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n");
-               i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED));
+               i40e_do_reset_safe(pf, BIT(__I40E_EMP_RESET_REQUESTED));
 
        } else if (strncmp(cmd_buf, "read", 4) == 0) {
                u32 address;
index 56438bd579e61a24d2f2c2cefefc89eecf2a926a..f141e78d409e5b1a7eeb8e7a304ec9864d2d0386 100644 (file)
@@ -144,11 +144,8 @@ i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
        ret_code = i40e_read_nvm_word(hw, I40E_SR_NVM_CONTROL_WORD, &reg_val);
        if (!ret_code &&
            ((reg_val & I40E_SR_CONTROL_WORD_1_MASK) ==
-            (0x01 << I40E_SR_CONTROL_WORD_1_SHIFT))) {
-               ret_code = i40e_validate_nvm_checksum(hw, NULL);
-       } else {
-               ret_code = I40E_ERR_DIAG_TEST_FAILED;
-       }
-
-       return ret_code;
+            BIT(I40E_SR_CONTROL_WORD_1_SHIFT)))
+               return i40e_validate_nvm_checksum(hw, NULL);
+       else
+               return I40E_ERR_DIAG_TEST_FAILED;
 }
index 9a68c65b17ea03bd00642aab5fe3b2e5a5066765..83d41c2cb02d43fceba54295f05151c12b912b5b 100644 (file)
@@ -148,7 +148,9 @@ static struct i40e_stats i40e_gstrings_stats[] = {
        I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
        I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
        I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
+       I40E_PF_STAT("fdir_atr_status", stats.fd_atr_status),
        I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
+       I40E_PF_STAT("fdir_sb_status", stats.fd_sb_status),
 
        /* LPI stats */
        I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
@@ -679,15 +681,17 @@ static int i40e_set_settings(struct net_device *netdev,
                /* make the aq call */
                status = i40e_aq_set_phy_config(hw, &config, NULL);
                if (status) {
-                       netdev_info(netdev, "Set phy config failed with error %d.\n",
-                                   status);
+                       netdev_info(netdev, "Set phy config failed, err %s aq_err %s\n",
+                                   i40e_stat_str(hw, status),
+                                   i40e_aq_str(hw, hw->aq.asq_last_status));
                        return -EAGAIN;
                }
 
                status = i40e_aq_get_link_info(hw, true, NULL, NULL);
                if (status)
-                       netdev_info(netdev, "Updating link info failed with error %d\n",
-                                   status);
+                       netdev_info(netdev, "Updating link info failed with err %s aq_err %s\n",
+                                   i40e_stat_str(hw, status),
+                                   i40e_aq_str(hw, hw->aq.asq_last_status));
 
        } else {
                netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
@@ -707,8 +711,9 @@ static int i40e_nway_reset(struct net_device *netdev)
 
        ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
        if (ret) {
-               netdev_info(netdev, "link restart failed, aq_err=%d\n",
-                           pf->hw.aq.asq_last_status);
+               netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
+                           i40e_stat_str(hw, ret),
+                           i40e_aq_str(hw, hw->aq.asq_last_status));
                return -EIO;
        }
 
@@ -820,18 +825,21 @@ static int i40e_set_pauseparam(struct net_device *netdev,
        status = i40e_set_fc(hw, &aq_failures, link_up);
 
        if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
-               netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with error %d and status %d\n",
-                           status, hw->aq.asq_last_status);
+               netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
+                           i40e_stat_str(hw, status),
+                           i40e_aq_str(hw, hw->aq.asq_last_status));
                err = -EAGAIN;
        }
        if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
-               netdev_info(netdev, "Set fc failed on the set_phy_config call with error %d and status %d\n",
-                           status, hw->aq.asq_last_status);
+               netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
+                           i40e_stat_str(hw, status),
+                           i40e_aq_str(hw, hw->aq.asq_last_status));
                err = -EAGAIN;
        }
        if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
-               netdev_info(netdev, "Set fc failed on the get_link_info call with error %d and status %d\n",
-                           status, hw->aq.asq_last_status);
+               netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
+                           i40e_stat_str(hw, status),
+                           i40e_aq_str(hw, hw->aq.asq_last_status));
                err = -EAGAIN;
        }
 
@@ -1009,7 +1017,7 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
                & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK)
                >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT;
        /* register returns value in power of 2, 64Kbyte chunks. */
-       val = (64 * 1024) * (1 << val);
+       val = (64 * 1024) * BIT(val);
        return val;
 }
 
@@ -1462,20 +1470,11 @@ static int i40e_get_ts_info(struct net_device *dev,
        else
                info->phc_index = -1;
 
-       info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
-
-       info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
-                          (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
-                          (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
-                          (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+       info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+
+       info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+                          BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
 
        return 0;
 }
@@ -1591,7 +1590,7 @@ static void i40e_diag_test(struct net_device *netdev,
                        /* indicate we're in test mode */
                        dev_close(netdev);
                else
-                       i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+                       i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
                /* Link test performed before hardware reset
                 * so autoneg doesn't interfere with test result
@@ -1613,7 +1612,7 @@ static void i40e_diag_test(struct net_device *netdev,
                        eth_test->flags |= ETH_TEST_FL_FAILED;
 
                clear_bit(__I40E_TESTING, &pf->state);
-               i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+               i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
 
                if (if_running)
                        dev_open(netdev);
@@ -1646,7 +1645,7 @@ static void i40e_get_wol(struct net_device *netdev,
 
        /* NVM bit on means WoL disabled for the port */
        i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
-       if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1) {
+       if ((BIT(hw->port) & wol_nvm_bits) || (hw->partition_id != 1)) {
                wol->supported = 0;
                wol->wolopts = 0;
        } else {
@@ -1679,7 +1678,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 
        /* NVM bit on means WoL disabled for the port */
        i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
-       if (((1 << hw->port) & wol_nvm_bits))
+       if (BIT(hw->port) & wol_nvm_bits)
                return -EOPNOTSUPP;
 
        /* only magic packet is supported */
@@ -2025,10 +2024,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        case TCP_V4_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+                       hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+                       hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
                        break;
                default:
                        return -EINVAL;
@@ -2037,10 +2036,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        case TCP_V6_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+                       hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+                       hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
                        break;
                default:
                        return -EINVAL;
@@ -2049,12 +2048,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        case UDP_V4_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-                                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+                       hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-                                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+                       hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+                                BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
                        break;
                default:
                        return -EINVAL;
@@ -2063,12 +2062,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        case UDP_V6_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-                                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+                       hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-                                ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+                       hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+                                BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
                        break;
                default:
                        return -EINVAL;
@@ -2081,7 +2080,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
                if ((nfc->data & RXH_L4_B_0_1) ||
                    (nfc->data & RXH_L4_B_2_3))
                        return -EINVAL;
-               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+               hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
                break;
        case AH_ESP_V6_FLOW:
        case AH_V6_FLOW:
@@ -2090,15 +2089,15 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
                if ((nfc->data & RXH_L4_B_0_1) ||
                    (nfc->data & RXH_L4_B_2_3))
                        return -EINVAL;
-               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+               hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
                break;
        case IPV4_FLOW:
-               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
-                       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+               hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+                       BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4);
                break;
        case IPV6_FLOW:
-               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
-                       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+               hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+                       BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6);
                break;
        default:
                return -EINVAL;
index c8b621e0e7cda622c5a0fa9e795a898e53886cf5..5ea75dd537d62f6e9da5545af1f597071b4aa6bd 100644 (file)
@@ -298,8 +298,8 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
 
        /* enable FCoE hash filter */
        val = rd32(hw, I40E_PFQF_HENA(1));
-       val |= 1 << (I40E_FILTER_PCTYPE_FCOE_OX - 32);
-       val |= 1 << (I40E_FILTER_PCTYPE_FCOE_RX - 32);
+       val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32);
+       val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32);
        val &= I40E_PFQF_HENA_PTYPE_ENA_MASK;
        wr32(hw, I40E_PFQF_HENA(1), val);
 
@@ -308,10 +308,10 @@ int i40e_init_pf_fcoe(struct i40e_pf *pf)
        pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
 
        /* Reserve 4K DDP contexts and 20K filter size for FCoE */
-       pf->fcoe_hmc_cntx_num = (1 << I40E_DMA_CNTX_SIZE_4K) *
-                                I40E_DMA_CNTX_BASE_SIZE;
+       pf->fcoe_hmc_cntx_num = BIT(I40E_DMA_CNTX_SIZE_4K) *
+                               I40E_DMA_CNTX_BASE_SIZE;
        pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num +
-                               (1 << I40E_HASH_FILTER_SIZE_16K) *
+                               BIT(I40E_HASH_FILTER_SIZE_16K) *
                                I40E_HASH_FILTER_BASE_SIZE;
 
        /* FCoE object: max 16K filter buckets and 4K DMA contexts */
@@ -348,7 +348,7 @@ u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf)
                if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
                    app.protocolid == ETH_P_FCOE) {
                        tc = dcbcfg->etscfg.prioritytable[app.priority];
-                       enabled_tc |= (1 << tc);
+                       enabled_tc |= BIT(tc);
                        break;
                }
        }
index 0d49e2d15d408c671c3acf581b10df5763fee7c3..a93174ddeaba747aa6a9576b53c8700ef3202dc2 100644 (file)
@@ -59,9 +59,9 @@
        (((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) & 0x1)
 
 #define I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT   \
-       (1 << I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
+       BIT(I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
 #define I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT   \
-       (1 << I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
+       BIT(I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
 
 #define I40E_RX_PROG_FCOE_ERROR_INVLFAIL(e)    \
        I40E_RX_PROG_FCOE_ERROR_CONFLICT(e)
index 9b987ccc9e828738caef1c4811c5db1ca33b388d..5ebe12d56ebf422b6273d0e810be8bbf113ced04 100644 (file)
@@ -116,6 +116,7 @@ exit:
  * @hw: pointer to our HW structure
  * @hmc_info: pointer to the HMC configuration information structure
  * @pd_index: which page descriptor index to manipulate
+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
  *
  * This function:
  *     1. Initializes the pd entry
@@ -129,12 +130,14 @@ exit:
  **/
 i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
                                              struct i40e_hmc_info *hmc_info,
-                                             u32 pd_index)
+                                             u32 pd_index,
+                                             struct i40e_dma_mem *rsrc_pg)
 {
        i40e_status ret_code = 0;
        struct i40e_hmc_pd_table *pd_table;
        struct i40e_hmc_pd_entry *pd_entry;
        struct i40e_dma_mem mem;
+       struct i40e_dma_mem *page = &mem;
        u32 sd_idx, rel_pd_idx;
        u64 *pd_addr;
        u64 page_desc;
@@ -155,18 +158,24 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
        pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
        pd_entry = &pd_table->pd_entry[rel_pd_idx];
        if (!pd_entry->valid) {
-               /* allocate a 4K backing page */
-               ret_code = i40e_allocate_dma_mem(hw, &mem, i40e_mem_bp,
-                                                I40E_HMC_PAGED_BP_SIZE,
-                                                I40E_HMC_PD_BP_BUF_ALIGNMENT);
-               if (ret_code)
-                       goto exit;
+               if (rsrc_pg) {
+                       pd_entry->rsrc_pg = true;
+                       page = rsrc_pg;
+               } else {
+                       /* allocate a 4K backing page */
+                       ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
+                                               I40E_HMC_PAGED_BP_SIZE,
+                                               I40E_HMC_PD_BP_BUF_ALIGNMENT);
+                       if (ret_code)
+                               goto exit;
+                       pd_entry->rsrc_pg = false;
+               }
 
-               pd_entry->bp.addr = mem;
+               pd_entry->bp.addr = *page;
                pd_entry->bp.sd_pd_index = pd_index;
                pd_entry->bp.entry_type = I40E_SD_TYPE_PAGED;
                /* Set page address and valid bit */
-               page_desc = mem.pa | 0x1;
+               page_desc = page->pa | 0x1;
 
                pd_addr = (u64 *)pd_table->pd_page_addr.va;
                pd_addr += rel_pd_idx;
@@ -240,7 +249,8 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
        I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
 
        /* free memory here */
-       ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
+       if (!pd_entry->rsrc_pg)
+               ret_code = i40e_free_dma_mem(hw, &pd_entry->bp.addr);
        if (ret_code)
                goto exit;
        if (!pd_table->ref_cnt)
@@ -287,21 +297,15 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
                                            u32 idx, bool is_pf)
 {
        struct i40e_hmc_sd_entry *sd_entry;
-       i40e_status ret_code = 0;
+
+       if (!is_pf)
+               return I40E_NOT_SUPPORTED;
 
        /* get the entry and decrease its ref counter */
        sd_entry = &hmc_info->sd_table.sd_entry[idx];
-       if (is_pf) {
-               I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
-       } else {
-               ret_code = I40E_NOT_SUPPORTED;
-               goto exit;
-       }
-       ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.bp.addr));
-       if (ret_code)
-               goto exit;
-exit:
-       return ret_code;
+       I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_DIRECT);
+
+       return i40e_free_dma_mem(hw, &sd_entry->u.bp.addr);
 }
 
 /**
@@ -341,20 +345,13 @@ i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
                                              struct i40e_hmc_info *hmc_info,
                                              u32 idx, bool is_pf)
 {
-       i40e_status ret_code = 0;
        struct i40e_hmc_sd_entry *sd_entry;
 
+       if (!is_pf)
+               return I40E_NOT_SUPPORTED;
+
        sd_entry = &hmc_info->sd_table.sd_entry[idx];
-       if (is_pf) {
-               I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
-       } else {
-               ret_code = I40E_NOT_SUPPORTED;
-               goto exit;
-       }
-       /* free memory here */
-       ret_code = i40e_free_dma_mem(hw, &(sd_entry->u.pd_table.pd_page_addr));
-       if (ret_code)
-               goto exit;
-exit:
-       return ret_code;
+       I40E_CLEAR_PF_SD_ENTRY(hw, idx, I40E_SD_TYPE_PAGED);
+
+       return  i40e_free_dma_mem(hw, &sd_entry->u.pd_table.pd_page_addr);
 }
index 732a02660330664ad7e65a2a78195a28bd4339fd..d906692113929e412df7806fd8795922586c9f78 100644 (file)
@@ -62,6 +62,7 @@ struct i40e_hmc_bp {
 struct i40e_hmc_pd_entry {
        struct i40e_hmc_bp bp;
        u32 sd_index;
+       bool rsrc_pg;
        bool valid;
 };
 
@@ -126,8 +127,8 @@ struct i40e_hmc_info {
                 I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |              \
                ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<            \
                I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |                  \
-               (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);            \
-       val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);      \
+               BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);              \
+       val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);     \
        wr32((hw), I40E_PFHMC_SDDATAHIGH, val1);                        \
        wr32((hw), I40E_PFHMC_SDDATALOW, val2);                         \
        wr32((hw), I40E_PFHMC_SDCMD, val3);                             \
@@ -146,7 +147,7 @@ struct i40e_hmc_info {
                I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |               \
                ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<            \
                I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);                   \
-       val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);      \
+       val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);     \
        wr32((hw), I40E_PFHMC_SDDATAHIGH, 0);                           \
        wr32((hw), I40E_PFHMC_SDDATALOW, val2);                         \
        wr32((hw), I40E_PFHMC_SDCMD, val3);                             \
@@ -218,7 +219,8 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
 
 i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
                                              struct i40e_hmc_info *hmc_info,
-                                             u32 pd_index);
+                                             u32 pd_index,
+                                             struct i40e_dma_mem *rsrc_pg);
 i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
                                        struct i40e_hmc_info *hmc_info,
                                        u32 idx);
index 0079ad7bcd0e1ff9c5fb985322a72d635418df14..fa371a2a40c6817e6f9f1dd8ff8b924ff3b08153 100644 (file)
@@ -129,7 +129,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
        obj->cnt = txq_num;
        obj->base = 0;
        size_exp = rd32(hw, I40E_GLHMC_LANTXOBJSZ);
-       obj->size = (u64)1 << size_exp;
+       obj->size = BIT_ULL(size_exp);
 
        /* validate values requested by driver don't exceed HMC capacity */
        if (txq_num > obj->max_cnt) {
@@ -152,7 +152,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
                     hw->hmc.hmc_obj[I40E_HMC_LAN_TX].size);
        obj->base = i40e_align_l2obj_base(obj->base);
        size_exp = rd32(hw, I40E_GLHMC_LANRXOBJSZ);
-       obj->size = (u64)1 << size_exp;
+       obj->size = BIT_ULL(size_exp);
 
        /* validate values requested by driver don't exceed HMC capacity */
        if (rxq_num > obj->max_cnt) {
@@ -175,7 +175,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
                     hw->hmc.hmc_obj[I40E_HMC_LAN_RX].size);
        obj->base = i40e_align_l2obj_base(obj->base);
        size_exp = rd32(hw, I40E_GLHMC_FCOEDDPOBJSZ);
-       obj->size = (u64)1 << size_exp;
+       obj->size = BIT_ULL(size_exp);
 
        /* validate values requested by driver don't exceed HMC capacity */
        if (fcoe_cntx_num > obj->max_cnt) {
@@ -198,7 +198,7 @@ i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
                     hw->hmc.hmc_obj[I40E_HMC_FCOE_CTX].size);
        obj->base = i40e_align_l2obj_base(obj->base);
        size_exp = rd32(hw, I40E_GLHMC_FCOEFOBJSZ);
-       obj->size = (u64)1 << size_exp;
+       obj->size = BIT_ULL(size_exp);
 
        /* validate values requested by driver don't exceed HMC capacity */
        if (fcoe_filt_num > obj->max_cnt) {
@@ -387,7 +387,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
                                /* update the pd table entry */
                                ret_code = i40e_add_pd_table_entry(hw,
                                                                info->hmc_info,
-                                                               i);
+                                                               i, NULL);
                                if (ret_code) {
                                        pd_error = true;
                                        break;
@@ -763,7 +763,7 @@ static void i40e_write_byte(u8 *hmc_bits,
 
        /* prepare the bits and mask */
        shift_width = ce_info->lsb % 8;
-       mask = ((u8)1 << ce_info->width) - 1;
+       mask = BIT(ce_info->width) - 1;
 
        src_byte = *from;
        src_byte &= mask;
@@ -804,7 +804,7 @@ static void i40e_write_word(u8 *hmc_bits,
 
        /* prepare the bits and mask */
        shift_width = ce_info->lsb % 8;
-       mask = ((u16)1 << ce_info->width) - 1;
+       mask = BIT(ce_info->width) - 1;
 
        /* don't swizzle the bits until after the mask because the mask bits
         * will be in a different bit position on big endian machines
@@ -854,7 +854,7 @@ static void i40e_write_dword(u8 *hmc_bits,
         * to 5 bits so the shift will do nothing
         */
        if (ce_info->width < 32)
-               mask = ((u32)1 << ce_info->width) - 1;
+               mask = BIT(ce_info->width) - 1;
        else
                mask = ~(u32)0;
 
@@ -906,7 +906,7 @@ static void i40e_write_qword(u8 *hmc_bits,
         * to 6 bits so the shift will do nothing
         */
        if (ce_info->width < 64)
-               mask = ((u64)1 << ce_info->width) - 1;
+               mask = BIT_ULL(ce_info->width) - 1;
        else
                mask = ~(u64)0;
 
index 48a52b35b61427c436b37ed070e998c7483150fb..857d294d2a453c3097a3ead5c7a5fa27df001ae4 100644 (file)
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
 
 #define DRV_VERSION_MAJOR 1
 #define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 4
+#define DRV_VERSION_BUILD 6
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -520,7 +520,7 @@ static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
        if (likely(new_data >= *offset))
                *stat = new_data - *offset;
        else
-               *stat = (new_data + ((u64)1 << 48)) - *offset;
+               *stat = (new_data + BIT_ULL(48)) - *offset;
        *stat &= 0xFFFFFFFFFFFFULL;
 }
 
@@ -543,7 +543,7 @@ static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
        if (likely(new_data >= *offset))
                *stat = (u32)(new_data - *offset);
        else
-               *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
+               *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
 }
 
 /**
@@ -1123,6 +1123,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
                           pf->stat_offsets_loaded,
                           &osd->rx_lpi_count, &nsd->rx_lpi_count);
 
+       if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
+           !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
+               nsd->fd_sb_status = true;
+       else
+               nsd->fd_sb_status = false;
+
+       if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
+           !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+               nsd->fd_atr_status = true;
+       else
+               nsd->fd_atr_status = false;
+
        pf->stat_offsets_loaded = true;
 }
 
@@ -1264,7 +1276,7 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
 {
        struct i40e_aqc_remove_macvlan_element_data element;
        struct i40e_pf *pf = vsi->back;
-       i40e_status aq_ret;
+       i40e_status ret;
 
        /* Only appropriate for the PF main VSI */
        if (vsi->type != I40E_VSI_MAIN)
@@ -1275,8 +1287,8 @@ static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
        element.vlan_tag = 0;
        element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
                        I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
-       aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
-       if (aq_ret)
+       ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+       if (ret)
                return -ENOENT;
 
        return 0;
@@ -1514,7 +1526,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
        if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
                /* Find numtc from enabled TC bitmap */
                for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-                       if (enabled_tc & (1 << i)) /* TC is enabled */
+                       if (enabled_tc & BIT_ULL(i)) /* TC is enabled */
                                numtc++;
                }
                if (!numtc) {
@@ -1540,7 +1552,8 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
        /* Setup queue offset/count for all TCs for given VSI */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
                /* See if the given TC is enabled for the given VSI */
-               if (vsi->tc_config.enabled_tc & (1 << i)) { /* TC is enabled */
+               if (vsi->tc_config.enabled_tc & BIT_ULL(i)) {
+                       /* TC is enabled */
                        int pow, num_qps;
 
                        switch (vsi->type) {
@@ -1566,7 +1579,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
                        /* find the next higher power-of-2 of num queue pairs */
                        num_qps = qcount;
                        pow = 0;
-                       while (num_qps && ((1 << pow) < qcount)) {
+                       while (num_qps && (BIT_ULL(pow) < qcount)) {
                                pow++;
                                num_qps >>= 1;
                        }
@@ -1716,10 +1729,11 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
        bool add_happened = false;
        int filter_list_len = 0;
        u32 changed_flags = 0;
-       i40e_status aq_ret = 0;
+       i40e_status ret = 0;
        struct i40e_pf *pf;
        int num_add = 0;
        int num_del = 0;
+       int aq_err = 0;
        u16 cmd_flags;
 
        /* empty array typed pointers, kcalloc later */
@@ -1771,31 +1785,31 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 
                        /* flush a full buffer */
                        if (num_del == filter_list_len) {
-                               aq_ret = i40e_aq_remove_macvlan(&pf->hw,
-                                           vsi->seid, del_list, num_del,
-                                           NULL);
+                               ret = i40e_aq_remove_macvlan(&pf->hw,
+                                                 vsi->seid, del_list, num_del,
+                                                 NULL);
+                               aq_err = pf->hw.aq.asq_last_status;
                                num_del = 0;
                                memset(del_list, 0, sizeof(*del_list));
 
-                               if (aq_ret &&
-                                   pf->hw.aq.asq_last_status !=
-                                                             I40E_AQ_RC_ENOENT)
+                               if (ret && aq_err != I40E_AQ_RC_ENOENT)
                                        dev_info(&pf->pdev->dev,
-                                                "ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
-                                                aq_ret,
-                                                pf->hw.aq.asq_last_status);
+                                                "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
+                                                i40e_stat_str(&pf->hw, ret),
+                                                i40e_aq_str(&pf->hw, aq_err));
                        }
                }
                if (num_del) {
-                       aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
+                       ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
                                                     del_list, num_del, NULL);
+                       aq_err = pf->hw.aq.asq_last_status;
                        num_del = 0;
 
-                       if (aq_ret &&
-                           pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
+                       if (ret && aq_err != I40E_AQ_RC_ENOENT)
                                dev_info(&pf->pdev->dev,
-                                        "ignoring delete macvlan error, err %d, aq_err %d\n",
-                                        aq_ret, pf->hw.aq.asq_last_status);
+                                        "ignoring delete macvlan error, err %s aq_err %s\n",
+                                        i40e_stat_str(&pf->hw, ret),
+                                        i40e_aq_str(&pf->hw, aq_err));
                }
 
                kfree(del_list);
@@ -1833,29 +1847,31 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 
                        /* flush a full buffer */
                        if (num_add == filter_list_len) {
-                               aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
-                                                            add_list, num_add,
-                                                            NULL);
+                               ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+                                                         add_list, num_add,
+                                                         NULL);
+                               aq_err = pf->hw.aq.asq_last_status;
                                num_add = 0;
 
-                               if (aq_ret)
+                               if (ret)
                                        break;
                                memset(add_list, 0, sizeof(*add_list));
                        }
                }
                if (num_add) {
-                       aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
-                                                    add_list, num_add, NULL);
+                       ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+                                                 add_list, num_add, NULL);
+                       aq_err = pf->hw.aq.asq_last_status;
                        num_add = 0;
                }
                kfree(add_list);
                add_list = NULL;
 
-               if (add_happened && aq_ret &&
-                   pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) {
+               if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) {
                        dev_info(&pf->pdev->dev,
-                                "add filter failed, err %d, aq_err %d\n",
-                                aq_ret, pf->hw.aq.asq_last_status);
+                                "add filter failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw, aq_err));
                        if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
                            !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
                                      &vsi->state)) {
@@ -1871,34 +1887,40 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
        if (changed_flags & IFF_ALLMULTI) {
                bool cur_multipromisc;
                cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
-               aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
-                                                              vsi->seid,
-                                                              cur_multipromisc,
-                                                              NULL);
-               if (aq_ret)
+               ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
+                                                           vsi->seid,
+                                                           cur_multipromisc,
+                                                           NULL);
+               if (ret)
                        dev_info(&pf->pdev->dev,
-                                "set multi promisc failed, err %d, aq_err %d\n",
-                                aq_ret, pf->hw.aq.asq_last_status);
+                                "set multi promisc failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
        }
        if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
                bool cur_promisc;
                cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
                               test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
                                        &vsi->state));
-               aq_ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
-                                                            vsi->seid,
-                                                            cur_promisc, NULL);
-               if (aq_ret)
+               ret = i40e_aq_set_vsi_unicast_promiscuous(&vsi->back->hw,
+                                                         vsi->seid,
+                                                         cur_promisc, NULL);
+               if (ret)
                        dev_info(&pf->pdev->dev,
-                                "set uni promisc failed, err %d, aq_err %d\n",
-                                aq_ret, pf->hw.aq.asq_last_status);
-               aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
-                                                  vsi->seid,
-                                                  cur_promisc, NULL);
-               if (aq_ret)
+                                "set uni promisc failed, err %s, aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
+               ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
+                                               vsi->seid,
+                                               cur_promisc, NULL);
+               if (ret)
                        dev_info(&pf->pdev->dev,
-                                "set brdcast promisc failed, err %d, aq_err %d\n",
-                                aq_ret, pf->hw.aq.asq_last_status);
+                                "set brdcast promisc failed, err %s, aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
        }
 
        clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
@@ -1994,8 +2016,10 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
        ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
        if (ret) {
                dev_info(&vsi->back->pdev->dev,
-                        "%s: update vsi failed, aq_err=%d\n",
-                        __func__, vsi->back->hw.aq.asq_last_status);
+                        "update vlan stripping failed, err %s aq_err %s\n",
+                        i40e_stat_str(&vsi->back->hw, ret),
+                        i40e_aq_str(&vsi->back->hw,
+                                    vsi->back->hw.aq.asq_last_status));
        }
 }
 
@@ -2023,8 +2047,10 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
        ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
        if (ret) {
                dev_info(&vsi->back->pdev->dev,
-                        "%s: update vsi failed, aq_err=%d\n",
-                        __func__, vsi->back->hw.aq.asq_last_status);
+                        "update vlan stripping failed, err %s aq_err %s\n",
+                        i40e_stat_str(&vsi->back->hw, ret),
+                        i40e_aq_str(&vsi->back->hw,
+                                    vsi->back->hw.aq.asq_last_status));
        }
 }
 
@@ -2294,7 +2320,7 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
 {
        struct i40e_vsi_context ctxt;
-       i40e_status aq_ret;
+       i40e_status ret;
 
        vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
        vsi->info.pvid = cpu_to_le16(vid);
@@ -2304,11 +2330,13 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
 
        ctxt.seid = vsi->seid;
        ctxt.info = vsi->info;
-       aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-       if (aq_ret) {
+       ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+       if (ret) {
                dev_info(&vsi->back->pdev->dev,
-                        "%s: update vsi failed, aq_err=%d\n",
-                        __func__, vsi->back->hw.aq.asq_last_status);
+                        "add pvid failed, err %s aq_err %s\n",
+                        i40e_stat_str(&vsi->back->hw, ret),
+                        i40e_aq_str(&vsi->back->hw,
+                                    vsi->back->hw.aq.asq_last_status));
                return -ENOENT;
        }
 
@@ -2696,9 +2724,9 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
 #endif /* I40E_FCOE */
        /* round up for the chip's needs */
        vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
-                               (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
+                               BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
        vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
-                               (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
+                               BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
 
        /* set up individual rings */
        for (i = 0; i < vsi->num_queue_pairs && !err; i++)
@@ -2728,7 +2756,7 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
        }
 
        for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
-               if (!(vsi->tc_config.enabled_tc & (1 << n)))
+               if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
                        continue;
 
                qoffset = vsi->tc_config.tc_info[n].qoffset;
@@ -4073,7 +4101,7 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
                if (app.selector == I40E_APP_SEL_TCPIP &&
                    app.protocolid == I40E_APP_PROTOID_ISCSI) {
                        tc = dcbcfg->etscfg.prioritytable[app.priority];
-                       enabled_tc |= (1 << tc);
+                       enabled_tc |= BIT_ULL(tc);
                        break;
                }
        }
@@ -4122,7 +4150,7 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
        u8 i;
 
        for (i = 0; i < num_tc; i++)
-               enabled_tc |= 1 << i;
+               enabled_tc |= BIT(i);
 
        return enabled_tc;
 }
@@ -4157,7 +4185,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
        /* At least have TC0 */
        enabled_tc = (enabled_tc ? enabled_tc : 0x1);
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-               if (enabled_tc & (1 << i))
+               if (enabled_tc & BIT_ULL(i))
                        num_tc++;
        }
        return num_tc;
@@ -4179,11 +4207,11 @@ static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
 
        /* Find the first enabled TC */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-               if (enabled_tc & (1 << i))
+               if (enabled_tc & BIT_ULL(i))
                        break;
        }
 
-       return 1 << i;
+       return BIT(i);
 }
 
 /**
@@ -4221,26 +4249,28 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
        struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
        struct i40e_pf *pf = vsi->back;
        struct i40e_hw *hw = &pf->hw;
-       i40e_status aq_ret;
+       i40e_status ret;
        u32 tc_bw_max;
        int i;
 
        /* Get the VSI level BW configuration */
-       aq_ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
-       if (aq_ret) {
+       ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
+       if (ret) {
                dev_info(&pf->pdev->dev,
-                        "couldn't get PF vsi bw config, err %d, aq_err %d\n",
-                        aq_ret, pf->hw.aq.asq_last_status);
+                        "couldn't get PF vsi bw config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                return -EINVAL;
        }
 
        /* Get the VSI level BW configuration per TC */
-       aq_ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
-                                                 NULL);
-       if (aq_ret) {
+       ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
+                                              NULL);
+       if (ret) {
                dev_info(&pf->pdev->dev,
-                        "couldn't get PF vsi ets bw config, err %d, aq_err %d\n",
-                        aq_ret, pf->hw.aq.asq_last_status);
+                        "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                return -EINVAL;
        }
 
@@ -4279,16 +4309,16 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
                                       u8 *bw_share)
 {
        struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
-       i40e_status aq_ret;
+       i40e_status ret;
        int i;
 
        bw_data.tc_valid_bits = enabled_tc;
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
                bw_data.tc_bw_credits[i] = bw_share[i];
 
-       aq_ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
-                                         NULL);
-       if (aq_ret) {
+       ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
+                                      NULL);
+       if (ret) {
                dev_info(&vsi->back->pdev->dev,
                         "AQ command Config VSI BW allocation per TC failed = %d\n",
                         vsi->back->hw.aq.asq_last_status);
@@ -4337,7 +4367,7 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
                 * will set the numtc for netdev as 2 that will be
                 * referenced by the netdev layer as TC 0 and 1.
                 */
-               if (vsi->tc_config.enabled_tc & (1 << i))
+               if (vsi->tc_config.enabled_tc & BIT_ULL(i))
                        netdev_set_tc_queue(netdev,
                                        vsi->tc_config.tc_info[i].netdev_tc,
                                        vsi->tc_config.tc_info[i].qcount,
@@ -4399,7 +4429,7 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
 
        /* Enable ETS TCs with equal BW Share for now across all VSIs */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-               if (enabled_tc & (1 << i))
+               if (enabled_tc & BIT_ULL(i))
                        bw_share[i] = 1;
        }
 
@@ -4423,8 +4453,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
        ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
        if (ret) {
                dev_info(&vsi->back->pdev->dev,
-                        "update vsi failed, aq_err=%d\n",
-                        vsi->back->hw.aq.asq_last_status);
+                        "Update vsi tc config failed, err %s aq_err %s\n",
+                        i40e_stat_str(&vsi->back->hw, ret),
+                        i40e_aq_str(&vsi->back->hw,
+                                    vsi->back->hw.aq.asq_last_status));
                goto out;
        }
        /* update the local VSI info with updated queue map */
@@ -4435,8 +4467,10 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
        ret = i40e_vsi_get_bw_info(vsi);
        if (ret) {
                dev_info(&vsi->back->pdev->dev,
-                        "Failed updating vsi bw info, aq_err=%d\n",
-                        vsi->back->hw.aq.asq_last_status);
+                        "Failed updating vsi bw info, err %s aq_err %s\n",
+                        i40e_stat_str(&vsi->back->hw, ret),
+                        i40e_aq_str(&vsi->back->hw,
+                                    vsi->back->hw.aq.asq_last_status));
                goto out;
        }
 
@@ -4469,7 +4503,7 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
 
        /* Enable ETS TCs with equal BW Share for now */
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
-               if (enabled_tc & (1 << i))
+               if (enabled_tc & BIT_ULL(i))
                        bw_data.tc_bw_share_credits[i] = 1;
        }
 
@@ -4477,8 +4511,9 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
                                                   &bw_data, NULL);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "veb bw config failed, aq_err=%d\n",
-                        pf->hw.aq.asq_last_status);
+                        "VEB bw config failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                goto out;
        }
 
@@ -4486,8 +4521,9 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
        ret = i40e_veb_get_bw_info(veb);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "Failed getting veb bw config, aq_err=%d\n",
-                        pf->hw.aq.asq_last_status);
+                        "Failed getting veb bw config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
        }
 
 out:
@@ -4574,8 +4610,9 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
        ret = i40e_aq_resume_port_tx(hw, NULL);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "AQ command Resume Port Tx failed = %d\n",
-                         pf->hw.aq.asq_last_status);
+                        "Resume Port Tx failed, err %s aq_err %s\n",
+                         i40e_stat_str(&pf->hw, ret),
+                         i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                /* Schedule PF reset to recover */
                set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
                i40e_service_event_schedule(pf);
@@ -4627,8 +4664,9 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
                }
        } else {
                dev_info(&pf->pdev->dev,
-                        "AQ Querying DCB configuration failed: aq_err %d\n",
-                        pf->hw.aq.asq_last_status);
+                        "Query for DCB configuration failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, err),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
        }
 
 out:
@@ -4859,7 +4897,7 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc)
 
        /* Generate TC map for number of tc requested */
        for (i = 0; i < tc; i++)
-               enabled_tc |= (1 << i);
+               enabled_tc |= BIT_ULL(i);
 
        /* Requesting same TC configuration as already enabled */
        if (enabled_tc == vsi->tc_config.enabled_tc)
@@ -4998,7 +5036,7 @@ err_setup_rx:
 err_setup_tx:
        i40e_vsi_free_tx_resources(vsi);
        if (vsi == pf->vsi[pf->lan_vsi])
-               i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+               i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
 
        return err;
 }
@@ -5066,7 +5104,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
                i40e_vc_notify_reset(pf);
 
        /* do the biggest reset indicated */
-       if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
+       if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
 
                /* Request a Global Reset
                 *
@@ -5081,7 +5119,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
                val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
                wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
 
-       } else if (reset_flags & (1 << __I40E_CORE_RESET_REQUESTED)) {
+       } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
 
                /* Request a Core Reset
                 *
@@ -5093,7 +5131,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
                wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
                i40e_flush(&pf->hw);
 
-       } else if (reset_flags & (1 << __I40E_PF_RESET_REQUESTED)) {
+       } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
 
                /* Request a PF Reset
                 *
@@ -5106,7 +5144,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
                dev_dbg(&pf->pdev->dev, "PFR requested\n");
                i40e_handle_reset_warning(pf);
 
-       } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
+       } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
                int v;
 
                /* Find the VSI(s) that requested a re-init */
@@ -5123,7 +5161,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
 
                /* no further action needed, so return now */
                return;
-       } else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) {
+       } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
                int v;
 
                /* Find the VSI(s) that needs to be brought down */
@@ -5253,7 +5291,10 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
        /* Get updated DCBX data from firmware */
        ret = i40e_get_dcb_config(&pf->hw);
        if (ret) {
-               dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n");
+               dev_info(&pf->pdev->dev,
+                        "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                goto exit;
        }
 
@@ -5761,23 +5802,23 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
 
        rtnl_lock();
        if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
-               reset_flags |= (1 << __I40E_REINIT_REQUESTED);
+               reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED);
                clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
-               reset_flags |= (1 << __I40E_PF_RESET_REQUESTED);
+               reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED);
                clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
-               reset_flags |= (1 << __I40E_CORE_RESET_REQUESTED);
+               reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED);
                clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
-               reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
+               reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED);
                clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
        }
        if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
-               reset_flags |= (1 << __I40E_DOWN_REQUESTED);
+               reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED);
                clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
        }
 
@@ -5983,27 +6024,29 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
 {
        struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
        struct i40e_vsi_context ctxt;
-       int aq_ret;
+       int ret;
 
        ctxt.seid = pf->main_vsi_seid;
        ctxt.pf_num = pf->hw.pf_id;
        ctxt.vf_num = 0;
-       aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
-       if (aq_ret) {
+       ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+       if (ret) {
                dev_info(&pf->pdev->dev,
-                        "%s couldn't get PF vsi config, err %d, aq_err %d\n",
-                        __func__, aq_ret, pf->hw.aq.asq_last_status);
+                        "couldn't get PF vsi config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                return;
        }
        ctxt.flags = I40E_AQ_VSI_TYPE_PF;
        ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
        ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
 
-       aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-       if (aq_ret) {
+       ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+       if (ret) {
                dev_info(&pf->pdev->dev,
-                        "%s: update vsi switch failed, aq_err=%d\n",
-                        __func__, vsi->back->hw.aq.asq_last_status);
+                        "update vsi switch failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
        }
 }
 
@@ -6017,27 +6060,29 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
 {
        struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
        struct i40e_vsi_context ctxt;
-       int aq_ret;
+       int ret;
 
        ctxt.seid = pf->main_vsi_seid;
        ctxt.pf_num = pf->hw.pf_id;
        ctxt.vf_num = 0;
-       aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
-       if (aq_ret) {
+       ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+       if (ret) {
                dev_info(&pf->pdev->dev,
-                        "%s couldn't get PF vsi config, err %d, aq_err %d\n",
-                        __func__, aq_ret, pf->hw.aq.asq_last_status);
+                        "couldn't get PF vsi config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                return;
        }
        ctxt.flags = I40E_AQ_VSI_TYPE_PF;
        ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
        ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
 
-       aq_ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
-       if (aq_ret) {
+       ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+       if (ret) {
                dev_info(&pf->pdev->dev,
-                        "%s: update vsi switch failed, aq_err=%d\n",
-                        __func__, vsi->back->hw.aq.asq_last_status);
+                        "update vsi switch failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
        }
 }
 
@@ -6097,7 +6142,8 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
        ret = i40e_add_vsi(ctl_vsi);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "rebuild of owner VSI failed: %d\n", ret);
+                        "rebuild of veb_idx %d owner VSI failed: %d\n",
+                        veb->idx, ret);
                goto end_reconstitute;
        }
        i40e_vsi_reset_stats(ctl_vsi);
@@ -6176,8 +6222,10 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
                        buf_len = data_size;
                } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
                        dev_info(&pf->pdev->dev,
-                                "capability discovery failed: aq=%d\n",
-                                pf->hw.aq.asq_last_status);
+                                "capability discovery failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, err),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
                        return -ENODEV;
                }
        } while (err);
@@ -6363,7 +6411,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
        /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
        ret = i40e_init_adminq(&pf->hw);
        if (ret) {
-               dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret);
+               dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                goto clear_recovery;
        }
 
@@ -6373,11 +6423,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
 
        i40e_clear_pxe_mode(hw);
        ret = i40e_get_capabilities(pf);
-       if (ret) {
-               dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
-                        ret);
+       if (ret)
                goto end_core_reset;
-       }
 
        ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
                                hw->func_caps.num_rx_qp,
@@ -6418,12 +6465,16 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
                                       I40E_AQ_EVENT_LINK_UPDOWN |
                                       I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
        if (ret)
-               dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret);
+               dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
        /* make sure our flow control settings are restored */
        ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
        if (ret)
-               dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret);
+               dev_info(&pf->pdev->dev, "set fc fail, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
        /* Rebuild the VSIs and VEBs that existed before reset.
         * They are still in our local switch element arrays, so only
@@ -6484,8 +6535,10 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
                msleep(75);
                ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
                if (ret)
-                       dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
-                                pf->hw.aq.asq_last_status);
+                       dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
        }
        /* reinit the misc interrupt */
        if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -6647,8 +6700,8 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
        pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
 
        for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
-               if (pf->pending_vxlan_bitmap & (1 << i)) {
-                       pf->pending_vxlan_bitmap &= ~(1 << i);
+               if (pf->pending_vxlan_bitmap & BIT_ULL(i)) {
+                       pf->pending_vxlan_bitmap &= ~BIT_ULL(i);
                        port = pf->vxlan_ports[i];
                        if (port)
                                ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
@@ -6659,10 +6712,12 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
 
                        if (ret) {
                                dev_info(&pf->pdev->dev,
-                                        "%s vxlan port %d, index %d failed, err %d, aq_err %d\n",
+                                        "%s vxlan port %d, index %d failed, err %s aq_err %s\n",
                                         port ? "add" : "delete",
-                                        ntohs(port), i, ret,
-                                        pf->hw.aq.asq_last_status);
+                                        ntohs(port), i,
+                                        i40e_stat_str(&pf->hw, ret),
+                                        i40e_aq_str(&pf->hw,
+                                                   pf->hw.aq.asq_last_status));
                                pf->vxlan_ports[i] = 0;
                        }
                }
@@ -7459,7 +7514,7 @@ static int i40e_config_rss(struct i40e_pf *pf)
                        j = 0;
                /* lut = 4-byte sliding window of 4 lut entries */
                lut = (lut << 8) | (j &
-                        ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
+                        (BIT(pf->hw.func_caps.rss_table_entry_width) - 1));
                /* On i = 3, we have 4 entries in lut; write to the register */
                if ((i & 3) == 3)
                        wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
@@ -7533,7 +7588,7 @@ i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
        i40e_status status;
 
        /* Set the valid bit for this PF */
-       bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id);
+       bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
        bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
        bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
 
@@ -7567,8 +7622,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
        last_aq_status = pf->hw.aq.asq_last_status;
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "Cannot acquire NVM for read access, err %d: aq_err %d\n",
-                        ret, last_aq_status);
+                        "Cannot acquire NVM for read access, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, last_aq_status));
                goto bw_commit_out;
        }
 
@@ -7583,8 +7639,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
        last_aq_status = pf->hw.aq.asq_last_status;
        i40e_release_nvm(&pf->hw);
        if (ret) {
-               dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n",
-                        ret, last_aq_status);
+               dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, last_aq_status));
                goto bw_commit_out;
        }
 
@@ -7596,8 +7653,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
        last_aq_status = pf->hw.aq.asq_last_status;
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "Cannot acquire NVM for write access, err %d: aq_err %d\n",
-                        ret, last_aq_status);
+                        "Cannot acquire NVM for write access, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, last_aq_status));
                goto bw_commit_out;
        }
        /* Write it back out unchanged to initiate update NVM,
@@ -7615,8 +7673,9 @@ i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
        i40e_release_nvm(&pf->hw);
        if (ret)
                dev_info(&pf->pdev->dev,
-                        "BW settings NOT SAVED, err %d aq_err %d\n",
-                        ret, last_aq_status);
+                        "BW settings NOT SAVED, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, last_aq_status));
 bw_commit_out:
 
        return ret;
@@ -7662,7 +7721,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
        /* Depending on PF configurations, it is possible that the RSS
         * maximum might end up larger than the available queues
         */
-       pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
+       pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
        pf->rss_size = 1;
        pf->rss_table_size = pf->hw.func_caps.rss_table_size;
        pf->rss_size_max = min_t(int, pf->rss_size_max,
@@ -7673,7 +7732,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
        }
 
        /* MFP mode enabled */
-       if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) {
+       if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
                pf->flags |= I40E_FLAG_MFP_ENABLED;
                dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
                if (i40e_get_npar_bw_setting(pf))
@@ -7812,7 +7871,7 @@ static int i40e_set_features(struct net_device *netdev,
        need_reset = i40e_set_ntuple(pf, features);
 
        if (need_reset)
-               i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+               i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
 
        return 0;
 }
@@ -7875,7 +7934,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
 
        /* New port: add it and mark its index in the bitmap */
        pf->vxlan_ports[next_idx] = port;
-       pf->pending_vxlan_bitmap |= (1 << next_idx);
+       pf->pending_vxlan_bitmap |= BIT_ULL(next_idx);
        pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
 
        dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port));
@@ -7906,7 +7965,7 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
                 * and make it pending
                 */
                pf->vxlan_ports[idx] = 0;
-               pf->pending_vxlan_bitmap |= (1 << idx);
+               pf->pending_vxlan_bitmap |= BIT_ULL(idx);
                pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
 
                dev_info(&pf->pdev->dev, "deleting vxlan port %d\n",
@@ -7981,7 +8040,6 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
        return err;
 }
 
-#ifdef HAVE_BRIDGE_ATTRIBS
 /**
  * i40e_ndo_bridge_setlink - Set the hardware bridge mode
  * @dev: the netdev being configured
@@ -7995,7 +8053,8 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
  * bridge mode enabled.
  **/
 static int i40e_ndo_bridge_setlink(struct net_device *dev,
-                                  struct nlmsghdr *nlh)
+                                  struct nlmsghdr *nlh,
+                                  u16 flags)
 {
        struct i40e_netdev_priv *np = netdev_priv(dev);
        struct i40e_vsi *vsi = np->vsi;
@@ -8066,14 +8125,9 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
  * Return the mode in which the hardware bridge is operating in
  * i.e VEB or VEPA.
  **/
-#ifdef HAVE_BRIDGE_FILTER
 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
                                   struct net_device *dev,
                                   u32 filter_mask, int nlflags)
-#else
-static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
-                                  struct net_device *dev, int nlflags)
-#endif /* HAVE_BRIDGE_FILTER */
 {
        struct i40e_netdev_priv *np = netdev_priv(dev);
        struct i40e_vsi *vsi = np->vsi;
@@ -8097,7 +8151,25 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
        return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
                                       nlflags, 0, 0, filter_mask, NULL);
 }
-#endif /* HAVE_BRIDGE_ATTRIBS */
+
+#define I40E_MAX_TUNNEL_HDR_LEN 80
+/**
+ * i40e_features_check - Validate encapsulated packet conforms to limits
+ * @skb: skb buff
+ * @netdev: This physical port's netdev
+ * @features: Offload features that the stack believes apply
+ **/
+static netdev_features_t i40e_features_check(struct sk_buff *skb,
+                                            struct net_device *dev,
+                                            netdev_features_t features)
+{
+       if (skb->encapsulation &&
+           (skb_inner_mac_header(skb) - skb_transport_header(skb) >
+            I40E_MAX_TUNNEL_HDR_LEN))
+               return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+
+       return features;
+}
 
 static const struct net_device_ops i40e_netdev_ops = {
        .ndo_open               = i40e_open,
@@ -8133,10 +8205,9 @@ static const struct net_device_ops i40e_netdev_ops = {
 #endif
        .ndo_get_phys_port_id   = i40e_get_phys_port_id,
        .ndo_fdb_add            = i40e_ndo_fdb_add,
-#ifdef HAVE_BRIDGE_ATTRIBS
+       .ndo_features_check     = i40e_features_check,
        .ndo_bridge_getlink     = i40e_ndo_bridge_getlink,
        .ndo_bridge_setlink     = i40e_ndo_bridge_setlink,
-#endif /* HAVE_BRIDGE_ATTRIBS */
 };
 
 /**
@@ -8304,8 +8375,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                ctxt.flags = I40E_AQ_VSI_TYPE_PF;
                if (ret) {
                        dev_info(&pf->pdev->dev,
-                                "couldn't get PF vsi config, err %d, aq_err %d\n",
-                                ret, pf->hw.aq.asq_last_status);
+                                "couldn't get PF vsi config, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
                        return -ENOENT;
                }
                vsi->info = ctxt.info;
@@ -8327,8 +8400,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                        ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
                        if (ret) {
                                dev_info(&pf->pdev->dev,
-                                        "update vsi failed, aq_err=%d\n",
-                                        pf->hw.aq.asq_last_status);
+                                        "update vsi failed, err %s aq_err %s\n",
+                                        i40e_stat_str(&pf->hw, ret),
+                                        i40e_aq_str(&pf->hw,
+                                                   pf->hw.aq.asq_last_status));
                                ret = -ENOENT;
                                goto err;
                        }
@@ -8345,9 +8420,11 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                        ret = i40e_vsi_config_tc(vsi, enabled_tc);
                        if (ret) {
                                dev_info(&pf->pdev->dev,
-                                        "failed to configure TCs for main VSI tc_map 0x%08x, err %d, aq_err %d\n",
-                                        enabled_tc, ret,
-                                        pf->hw.aq.asq_last_status);
+                                        "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
+                                        enabled_tc,
+                                        i40e_stat_str(&pf->hw, ret),
+                                        i40e_aq_str(&pf->hw,
+                                                   pf->hw.aq.asq_last_status));
                                ret = -ENOENT;
                        }
                }
@@ -8438,8 +8515,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
                if (ret) {
                        dev_info(&vsi->back->pdev->dev,
-                                "add vsi failed, aq_err=%d\n",
-                                vsi->back->hw.aq.asq_last_status);
+                                "add vsi failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
                        ret = -ENOENT;
                        goto err;
                }
@@ -8484,8 +8563,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
        ret = i40e_vsi_get_bw_info(vsi);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "couldn't get vsi bw info, err %d, aq_err %d\n",
-                        ret, pf->hw.aq.asq_last_status);
+                        "couldn't get vsi bw info, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                /* VSI is already added so not tearing that up */
                ret = 0;
        }
@@ -8658,7 +8738,7 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
        ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
        if (ret < 0) {
                dev_info(&pf->pdev->dev,
-                        "failed to get tracking for %d queues for VSI %d err=%d\n",
+                        "failed to get tracking for %d queues for VSI %d err %d\n",
                         vsi->alloc_queue_pairs, vsi->seid, ret);
                goto err_vsi;
        }
@@ -8896,8 +8976,9 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
                                                  &bw_data, NULL);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "query veb bw config failed, aq_err=%d\n",
-                        hw->aq.asq_last_status);
+                        "query veb bw config failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
                goto out;
        }
 
@@ -8905,8 +8986,9 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
                                                   &ets_data, NULL);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "query veb bw ets config failed, aq_err=%d\n",
-                        hw->aq.asq_last_status);
+                        "query veb bw ets config failed, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
                goto out;
        }
 
@@ -9090,36 +9172,40 @@ void i40e_veb_release(struct i40e_veb *veb)
  **/
 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
 {
+       struct i40e_pf *pf = veb->pf;
        bool is_default = false;
        bool is_cloud = false;
        int ret;
 
        /* get a VEB from the hardware */
-       ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid,
+       ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
                              veb->enabled_tc, is_default,
                              is_cloud, &veb->seid, NULL);
        if (ret) {
-               dev_info(&veb->pf->pdev->dev,
-                        "couldn't add VEB, err %d, aq_err %d\n",
-                        ret, veb->pf->hw.aq.asq_last_status);
+               dev_info(&pf->pdev->dev,
+                        "couldn't add VEB, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                return -EPERM;
        }
 
        /* get statistics counter */
-       ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL,
+       ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
                                         &veb->stats_idx, NULL, NULL, NULL);
        if (ret) {
-               dev_info(&veb->pf->pdev->dev,
-                        "couldn't get VEB statistics idx, err %d, aq_err %d\n",
-                        ret, veb->pf->hw.aq.asq_last_status);
+               dev_info(&pf->pdev->dev,
+                        "couldn't get VEB statistics idx, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                return -EPERM;
        }
        ret = i40e_veb_get_bw_info(veb);
        if (ret) {
-               dev_info(&veb->pf->pdev->dev,
-                        "couldn't get VEB bw info, err %d, aq_err %d\n",
-                        ret, veb->pf->hw.aq.asq_last_status);
-               i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL);
+               dev_info(&pf->pdev->dev,
+                        "couldn't get VEB bw info, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+               i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
                return -ENOENT;
        }
 
@@ -9325,8 +9411,10 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
                                                &next_seid, NULL);
                if (ret) {
                        dev_info(&pf->pdev->dev,
-                                "get switch config failed %d aq_err=%x\n",
-                                ret, pf->hw.aq.asq_last_status);
+                                "get switch config failed err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
                        kfree(aq_buf);
                        return -ENOENT;
                }
@@ -9367,8 +9455,9 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
        ret = i40e_fetch_switch_configuration(pf, false);
        if (ret) {
                dev_info(&pf->pdev->dev,
-                        "couldn't fetch switch config, err %d, aq_err %d\n",
-                        ret, pf->hw.aq.asq_last_status);
+                        "couldn't fetch switch config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, ret),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
                return ret;
        }
        i40e_pf_reset_stats(pf);
@@ -9743,7 +9832,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        err = i40e_init_shared_code(hw);
        if (err) {
-               dev_info(&pdev->dev, "init_shared_code failed: %d\n", err);
+               dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
+                        err);
                goto err_pf_reset;
        }
 
@@ -9910,15 +10000,19 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                                       I40E_AQ_EVENT_LINK_UPDOWN |
                                       I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
        if (err)
-               dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err);
+               dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, err),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
        if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
            (pf->hw.aq.fw_maj_ver < 4)) {
                msleep(75);
                err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
                if (err)
-                       dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n",
-                                pf->hw.aq.asq_last_status);
+                       dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, err),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
        }
        /* The main driver is (mostly) up and happy. We need to set this state
         * before setting up the misc vector or we get a race and the vector
@@ -10006,8 +10100,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* get the requested speeds from the fw */
        err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
        if (err)
-               dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n",
-                        err);
+               dev_info(&pf->pdev->dev,
+                        "get phy capabilities failed, err %s aq_err %s, advertised speed settings may not be correct\n",
+                        i40e_stat_str(&pf->hw, err),
+                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
        pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
 
        /* print a string summarizing features */
index 554e49d02683c1783a56a2fa9235412e96bdcd3f..ce986af213d2847d9e2b41f86aa9812aad0054a3 100644 (file)
@@ -50,7 +50,7 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
        sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
                           I40E_GLNVM_GENS_SR_SIZE_SHIFT);
        /* Switching to words (sr_size contains power of 2KB) */
-       nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
+       nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
 
        /* Check if we are in the normal or blank NVM programming mode */
        fla = rd32(hw, I40E_GLNVM_FLA);
@@ -189,8 +189,8 @@ static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
        ret_code = i40e_poll_sr_srctl_done_bit(hw);
        if (!ret_code) {
                /* Write the address and start reading */
-               sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
-                        (1 << I40E_GLNVM_SRCTL_START_SHIFT);
+               sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
+                        BIT(I40E_GLNVM_SRCTL_START_SHIFT);
                wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
 
                /* Poll I40E_GLNVM_SRCTL until the done bit is set */
index 7b34f1e660eacf99699d411e3e498d6fdf85d37c..d52a9f7873b0c927a78accc7a25b23ab3867f153 100644 (file)
@@ -58,6 +58,8 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
 void i40e_idle_aq(struct i40e_hw *hw);
 bool i40e_check_asq_alive(struct i40e_hw *hw);
 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
 
 u32 i40e_led_get(struct i40e_hw *hw);
 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
index a92b7725dec3910964e5807a88d4f31622b55763..8c40d6ea15fda5cf4283769ab9d5753c67298917 100644 (file)
@@ -43,9 +43,8 @@
 #define I40E_PTP_10GB_INCVAL 0x0333333333ULL
 #define I40E_PTP_1GB_INCVAL  0x2000000000ULL
 
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1  (0x1 << \
-                                       I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2  (0x2 << \
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V1  BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2  (2 << \
                                        I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
 
 /**
@@ -357,7 +356,7 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index)
 
        prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
 
-       if (!(prttsyn_stat & (1 << index)))
+       if (!(prttsyn_stat & BIT(index)))
                return;
 
        lo = rd32(hw, I40E_PRTTSYN_RXTIME_L(index));
index 9a4f2bc70cd2cb5494576f5530a7447502cd3c91..330e4ef43cd8fafc9a5a8985b0b713e9aec93e49 100644 (file)
@@ -464,7 +464,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
        error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
                I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
 
-       if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
+       if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
                if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
                    (I40E_DEBUG_FD & pf->hw.debug_mask))
                        dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
@@ -509,8 +509,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
                        dev_info(&pdev->dev,
                                "FD filter programming failed due to incorrect filter parameters\n");
                }
-       } else if (error ==
-                         (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
+       } else if (error == BIT(I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
                if (I40E_DEBUG_FD & pf->hw.debug_mask)
                        dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n",
                                 rx_desc->wb.qword0.hi_dword.fd_id);
@@ -892,7 +891,7 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
         *  20-1249MB/s bulk   (8000 ints/s)
         */
        bytes_per_int = rc->total_bytes / rc->itr;
-       switch (rc->itr) {
+       switch (new_latency_range) {
        case I40E_LOWEST_LATENCY:
                if (bytes_per_int > 10)
                        new_latency_range = I40E_LOW_LATENCY;
@@ -905,9 +904,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
                break;
        case I40E_BULK_LATENCY:
                if (bytes_per_int <= 20)
-                       rc->latency_range = I40E_LOW_LATENCY;
+                       new_latency_range = I40E_LOW_LATENCY;
+               break;
+       default:
+               if (bytes_per_int <= 20)
+                       new_latency_range = I40E_LOW_LATENCY;
                break;
        }
+       rc->latency_range = new_latency_range;
 
        switch (new_latency_range) {
        case I40E_LOWEST_LATENCY:
@@ -923,41 +927,13 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
                break;
        }
 
-       if (new_itr != rc->itr) {
-               /* do an exponential smoothing */
-               new_itr = (10 * new_itr * rc->itr) /
-                         ((9 * new_itr) + rc->itr);
-               rc->itr = new_itr & I40E_MAX_ITR;
-       }
+       if (new_itr != rc->itr)
+               rc->itr = new_itr;
 
        rc->total_bytes = 0;
        rc->total_packets = 0;
 }
 
-/**
- * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
- * @q_vector: the vector to adjust
- **/
-static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
-{
-       u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
-       struct i40e_hw *hw = &q_vector->vsi->back->hw;
-       u32 reg_addr;
-       u16 old_itr;
-
-       reg_addr = I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1);
-       old_itr = q_vector->rx.itr;
-       i40e_set_new_dynamic_itr(&q_vector->rx);
-       if (old_itr != q_vector->rx.itr)
-               wr32(hw, reg_addr, q_vector->rx.itr);
-
-       reg_addr = I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1);
-       old_itr = q_vector->tx.itr;
-       i40e_set_new_dynamic_itr(&q_vector->tx);
-       if (old_itr != q_vector->tx.itr)
-               wr32(hw, reg_addr, q_vector->tx.itr);
-}
-
 /**
  * i40e_clean_programming_status - clean the programming status descriptor
  * @rx_ring: the rx ring that has this descriptor
@@ -1386,7 +1362,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                return;
 
        /* did the hardware decode the packet and checksum? */
-       if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+       if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
                return;
 
        /* both known and outer_ip must be set for the below code to work */
@@ -1401,25 +1377,25 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                ipv6 = true;
 
        if (ipv4 &&
-           (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
-                        (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+           (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
+                        BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
                goto checksum_fail;
 
        /* likely incorrect csum if alternate IP extension headers found */
        if (ipv6 &&
-           rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+           rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
                /* don't increment checksum err here, non-fatal err */
                return;
 
        /* there was some L4 error, count error and punt packet to the stack */
-       if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+       if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
                goto checksum_fail;
 
        /* handle packets that were not able to be checksummed due
         * to arrival speed, in this case the stack can compute
         * the csum.
         */
-       if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
+       if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
                return;
 
        /* If VXLAN traffic has an outer UDPv4 checksum we need to check
@@ -1543,7 +1519,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
                        I40E_RXD_QW1_STATUS_SHIFT;
 
-               if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+               if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
                        break;
 
                /* This memory barrier is needed to keep us from reading
@@ -1584,8 +1560,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
 
                rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
                           I40E_RXD_QW1_ERROR_SHIFT;
-               rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
-               rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+               rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+               rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
 
                rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
                           I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1637,7 +1613,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                I40E_RX_INCREMENT(rx_ring, i);
 
                if (unlikely(
-                   !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+                   !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
                        struct i40e_rx_buffer *next_buffer;
 
                        next_buffer = &rx_ring->rx_bi[i];
@@ -1647,7 +1623,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                }
 
                /* ERR_MASK will only have valid bits if EOP set */
-               if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+               if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
                        dev_kfree_skb_any(skb);
                        continue;
                }
@@ -1669,7 +1645,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
 
                i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
 
-               vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+               vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
                         ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
                         : 0;
 #ifdef I40E_FCOE
@@ -1730,7 +1706,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
                rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
                        I40E_RXD_QW1_STATUS_SHIFT;
 
-               if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+               if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
                        break;
 
                /* This memory barrier is needed to keep us from reading
@@ -1753,7 +1729,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
 
                rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
                           I40E_RXD_QW1_ERROR_SHIFT;
-               rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+               rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
 
                rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
                           I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1771,13 +1747,13 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
                I40E_RX_INCREMENT(rx_ring, i);
 
                if (unlikely(
-                   !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+                   !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
                        rx_ring->rx_stats.non_eop_descs++;
                        continue;
                }
 
                /* ERR_MASK will only have valid bits if EOP set */
-               if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+               if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
                        dev_kfree_skb_any(skb);
                        /* TODO: shouldn't we increment a counter indicating the
                         * drop?
@@ -1802,7 +1778,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
 
                i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
 
-               vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+               vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
                         ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
                         : 0;
 #ifdef I40E_FCOE
@@ -1826,6 +1802,68 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
        return total_rx_packets;
 }
 
+/**
+ * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
+ * @vsi: the VSI we care about
+ * @q_vector: q_vector for which itr is being updated and interrupt enabled
+ *
+ **/
+static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+                                         struct i40e_q_vector *q_vector)
+{
+       struct i40e_hw *hw = &vsi->back->hw;
+       u16 old_itr;
+       int vector;
+       u32 val;
+
+       vector = (q_vector->v_idx + vsi->base_vector);
+       if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+               old_itr = q_vector->rx.itr;
+               i40e_set_new_dynamic_itr(&q_vector->rx);
+               if (old_itr != q_vector->rx.itr) {
+                       val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+                       I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+                       (I40E_RX_ITR <<
+                               I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+                       (q_vector->rx.itr <<
+                               I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
+               } else {
+                       val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+                       I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+                       (I40E_ITR_NONE <<
+                               I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+               }
+               if (!test_bit(__I40E_DOWN, &vsi->state))
+                       wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
+       } else {
+               i40e_irq_dynamic_enable(vsi,
+                                       q_vector->v_idx + vsi->base_vector);
+       }
+       if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+               old_itr = q_vector->tx.itr;
+               i40e_set_new_dynamic_itr(&q_vector->tx);
+               if (old_itr != q_vector->tx.itr) {
+                       val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+                               I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+                               (I40E_TX_ITR <<
+                                  I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+                               (q_vector->tx.itr <<
+                                  I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
+               } else {
+                       val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+                               I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+                               (I40E_ITR_NONE <<
+                                  I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+               }
+               if (!test_bit(__I40E_DOWN, &vsi->state))
+                       wr32(hw, I40E_PFINT_DYN_CTLN(q_vector->v_idx +
+                             vsi->base_vector - 1), val);
+       } else {
+               i40e_irq_dynamic_enable(vsi,
+                                       q_vector->v_idx + vsi->base_vector);
+       }
+}
+
 /**
  * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
  * @napi: napi struct with our devices info in it
@@ -1882,33 +1920,24 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
 
        /* Work is done so exit the polling mode and re-enable the interrupt */
        napi_complete(napi);
-       if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
-           ITR_IS_DYNAMIC(vsi->tx_itr_setting))
-               i40e_update_dynamic_itr(q_vector);
-
-       if (!test_bit(__I40E_DOWN, &vsi->state)) {
-               if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
-                       i40e_irq_dynamic_enable(vsi,
-                                       q_vector->v_idx + vsi->base_vector);
-               } else {
-                       struct i40e_hw *hw = &vsi->back->hw;
-                       /* We re-enable the queue 0 cause, but
-                        * don't worry about dynamic_enable
-                        * because we left it on for the other
-                        * possible interrupts during napi
-                        */
-                       u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
-                       qval |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
-                       wr32(hw, I40E_QINT_RQCTL(0), qval);
-
-                       qval = rd32(hw, I40E_QINT_TQCTL(0));
-                       qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
-                       wr32(hw, I40E_QINT_TQCTL(0), qval);
-
-                       i40e_irq_dynamic_enable_icr0(vsi->back);
-               }
+       if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
+               i40e_update_enable_itr(vsi, q_vector);
+       } else { /* Legacy mode */
+               struct i40e_hw *hw = &vsi->back->hw;
+               /* We re-enable the queue 0 cause, but
+                * don't worry about dynamic_enable
+                * because we left it on for the other
+                * possible interrupts during napi
+                */
+               u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
+                          I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+
+               wr32(hw, I40E_QINT_RQCTL(0), qval);
+               qval = rd32(hw, I40E_QINT_TQCTL(0)) |
+                      I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+               wr32(hw, I40E_QINT_TQCTL(0), qval);
+               i40e_irq_dynamic_enable_icr0(vsi->back);
        }
-
        return 0;
 }
 
@@ -2616,6 +2645,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
            netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
                                                   tx_ring->queue_index)))
                writel(i, tx_ring->tail);
+       else
+               prefetchw(tx_desc + 1);
 
        return;
 
index 0dc48dc9ca61922a4b11bd0b7624f07c153c603a..429833c47245faa6cd3ad7d1e757b9668c169b05 100644 (file)
@@ -66,17 +66,17 @@ enum i40e_dyn_idx_t {
 
 /* Supported RSS offloads */
 #define I40E_DEFAULT_RSS_HENA ( \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
 
 /* Supported Rx Buffer Sizes */
 #define I40E_RXBUFFER_512   512    /* Used for packet split */
@@ -129,17 +129,17 @@ enum i40e_dyn_idx_t {
 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
 #define I40E_MIN_DESC_PENDING  4
 
-#define I40E_TX_FLAGS_CSUM             (u32)(1)
-#define I40E_TX_FLAGS_HW_VLAN          (u32)(1 << 1)
-#define I40E_TX_FLAGS_SW_VLAN          (u32)(1 << 2)
-#define I40E_TX_FLAGS_TSO              (u32)(1 << 3)
-#define I40E_TX_FLAGS_IPV4             (u32)(1 << 4)
-#define I40E_TX_FLAGS_IPV6             (u32)(1 << 5)
-#define I40E_TX_FLAGS_FCCRC            (u32)(1 << 6)
-#define I40E_TX_FLAGS_FSO              (u32)(1 << 7)
-#define I40E_TX_FLAGS_TSYN             (u32)(1 << 8)
-#define I40E_TX_FLAGS_FD_SB            (u32)(1 << 9)
-#define I40E_TX_FLAGS_VXLAN_TUNNEL     (u32)(1 << 10)
+#define I40E_TX_FLAGS_CSUM             BIT(0)
+#define I40E_TX_FLAGS_HW_VLAN          BIT(1)
+#define I40E_TX_FLAGS_SW_VLAN          BIT(2)
+#define I40E_TX_FLAGS_TSO              BIT(3)
+#define I40E_TX_FLAGS_IPV4             BIT(4)
+#define I40E_TX_FLAGS_IPV6             BIT(5)
+#define I40E_TX_FLAGS_FCCRC            BIT(6)
+#define I40E_TX_FLAGS_FSO              BIT(7)
+#define I40E_TX_FLAGS_TSYN             BIT(8)
+#define I40E_TX_FLAGS_FD_SB            BIT(9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL     BIT(10)
 #define I40E_TX_FLAGS_VLAN_MASK                0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK   0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT  29
index 9a5a75b1e2bc053b50bec13adde2fd3aa4848595..a20128b82b62511ca55ec984d6587b3050761c82 100644 (file)
@@ -213,7 +213,17 @@ struct i40e_hw_capabilities {
        bool dcb;
        bool fcoe;
        bool iscsi; /* Indicates iSCSI enabled */
-       bool mfp_mode_1;
+       bool flex10_enable;
+       bool flex10_capable;
+       u32  flex10_mode;
+#define I40E_FLEX10_MODE_UNKNOWN       0x0
+#define I40E_FLEX10_MODE_DCC           0x1
+#define I40E_FLEX10_MODE_DCI           0x2
+
+       u32 flex10_status;
+#define I40E_FLEX10_STATUS_DCC_ERROR   0x1
+#define I40E_FLEX10_STATUS_VC_MODE     0x2
+
        bool mgmt_cem;
        bool ieee_1588;
        bool iwarp;
@@ -487,6 +497,7 @@ struct i40e_hw {
 
        /* debug mask */
        u32 debug_mask;
+       char err_str[16];
 };
 
 static inline bool i40e_is_vf(struct i40e_hw *hw)
@@ -600,7 +611,7 @@ enum i40e_rx_desc_status_bits {
 };
 
 #define I40E_RXD_QW1_STATUS_SHIFT      0
-#define I40E_RXD_QW1_STATUS_MASK       (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+#define I40E_RXD_QW1_STATUS_MASK       ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
                                         << I40E_RXD_QW1_STATUS_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT   I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
@@ -608,8 +619,8 @@ enum i40e_rx_desc_status_bits {
                                             I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT  I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK     (0x1UL << \
-                                        I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
+                                   BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
 
 enum i40e_rx_desc_fltstat_values {
        I40E_RX_DESC_FLTSTAT_NO_DATA    = 0,
@@ -743,8 +754,7 @@ enum i40e_rx_ptype_payload_layer {
                                         I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
 
 #define I40E_RXD_QW1_LENGTH_SPH_SHIFT  63
-#define I40E_RXD_QW1_LENGTH_SPH_MASK   (0x1ULL << \
-                                        I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+#define I40E_RXD_QW1_LENGTH_SPH_MASK   BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
 
 enum i40e_rx_desc_ext_status_bits {
        /* Note: These are predefined bit offsets */
@@ -920,12 +930,12 @@ enum i40e_tx_ctx_desc_eipt_offload {
 #define I40E_TXD_CTX_QW0_NATT_SHIFT    9
 #define I40E_TXD_CTX_QW0_NATT_MASK     (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
-#define I40E_TXD_CTX_UDP_TUNNELING     (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_UDP_TUNNELING     BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
 #define I40E_TXD_CTX_GRE_TUNNELING     (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
 #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT       11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK        (0x1ULL << \
-                                        I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
+                                      BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
 
 #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST      I40E_TXD_CTX_QW0_EIP_NOINC_MASK
 
@@ -990,8 +1000,8 @@ enum i40e_filter_program_desc_fd_status {
 };
 
 #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT       23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK        (0x1FFUL << \
-                                        I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
+                                      BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CMD_SHIFT    4
 #define I40E_TXD_FLTR_QW1_CMD_MASK     (0xFFFFULL << \
@@ -1009,8 +1019,7 @@ enum i40e_filter_program_desc_pcmd {
 #define I40E_TXD_FLTR_QW1_DEST_MASK    (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT        (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
-                                        I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT      (0x9ULL + \
                                                 I40E_TXD_FLTR_QW1_CMD_SHIFT)
@@ -1134,6 +1143,8 @@ struct i40e_hw_port_stats {
        u64 fd_atr_match;
        u64 fd_sb_match;
        u64 fd_atr_tunnel_match;
+       u32 fd_atr_status;
+       u32 fd_sb_status;
        /* EEE LPI */
        u32 tx_lpi_status;
        u32 rx_lpi_status;
index 2d20af290fbf20bc9fc0dbdc88486fea61ec978f..a7ab463b44746a5f74f6d7339b060df9c235c4be 100644 (file)
@@ -110,7 +110,9 @@ struct i40e_virtchnl_msg {
  * error regardless of version mismatch.
  */
 #define I40E_VIRTCHNL_VERSION_MAJOR            1
-#define I40E_VIRTCHNL_VERSION_MINOR            0
+#define I40E_VIRTCHNL_VERSION_MINOR            1
+#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
 struct i40e_virtchnl_version_info {
        u32 major;
        u32 minor;
@@ -129,7 +131,8 @@ struct i40e_virtchnl_version_info {
  */
 
 /* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * VF sends this request to PF with no parameters
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
  * PF responds with an indirect message containing
  * i40e_virtchnl_vf_resource and one or more
  * i40e_virtchnl_vsi_resource structures.
@@ -143,9 +146,12 @@ struct i40e_virtchnl_vsi_resource {
        u8 default_mac_addr[ETH_ALEN];
 };
 /* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2    0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE  0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN  0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2            0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP         0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE          0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ                0x00000008
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG       0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN          0x00010000
 
 struct i40e_virtchnl_vf_resource {
        u16 num_vsis;
index 23f95cdbdfcc2c20d5913fbab3a2b71a1bb61064..d29d4062addf51141dbeefc152d1613b8171c981 100644 (file)
@@ -160,13 +160,8 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
  **/
 static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
 {
-       struct i40e_hw *hw = &pf->hw;
-       u32 reg;
-
-       reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
-       reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
-       wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
-       i40e_flush(hw);
+       i40e_vc_notify_vf_reset(vf);
+       i40e_reset_vf(vf, false);
 }
 
 /**
@@ -282,16 +277,14 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
        }
        tempmap = vecmap->rxq_map;
        for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
-               linklistmap |= (1 <<
-                               (I40E_VIRTCHNL_SUPPORTED_QTYPES *
-                                vsi_queue_id));
+               linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
+                                   vsi_queue_id));
        }
 
        tempmap = vecmap->txq_map;
        for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
-               linklistmap |= (1 <<
-                               (I40E_VIRTCHNL_SUPPORTED_QTYPES * vsi_queue_id
-                                + 1));
+               linklistmap |= (BIT(I40E_VIRTCHNL_SUPPORTED_QTYPES *
+                                    vsi_queue_id + 1));
        }
 
        next_q = find_first_bit(&linklistmap,
@@ -337,7 +330,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
                reg = (vector_id) |
                    (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
                    (pf_queue_id << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
-                   (1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+                   BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
                    (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
                wr32(hw, reg_idx, reg);
        }
@@ -542,11 +535,13 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
                if (vf->port_vlan_id)
                        i40e_vsi_add_pvid(vsi, vf->port_vlan_id);
                f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
-                                   vf->port_vlan_id, true, false);
+                                   vf->port_vlan_id ? vf->port_vlan_id : -1,
+                                   true, false);
                if (!f)
                        dev_info(&pf->pdev->dev,
                                 "Could not allocate VF MAC addr\n");
-               f = i40e_add_filter(vsi, brdcast, vf->port_vlan_id,
+               f = i40e_add_filter(vsi, brdcast,
+                                   vf->port_vlan_id ? vf->port_vlan_id : -1,
                                    true, false);
                if (!f)
                        dev_info(&pf->pdev->dev,
@@ -835,6 +830,7 @@ complete_reset:
        i40e_alloc_vf_res(vf);
        i40e_enable_vf_mappings(vf);
        set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
+       clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
 
        /* tell the VF the reset is done */
        wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
@@ -899,7 +895,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
                for (vf_id = 0; vf_id < tmp; vf_id++) {
                        reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
                        bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
-                       wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+                       wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
                }
        }
        clear_bit(__I40E_VF_DISABLE, &pf->state);
@@ -1123,12 +1119,16 @@ static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
  *
  * called from the VF to request the API version used by the PF
  **/
-static int i40e_vc_get_version_msg(struct i40e_vf *vf)
+static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
 {
        struct i40e_virtchnl_version_info info = {
                I40E_VIRTCHNL_VERSION_MAJOR, I40E_VIRTCHNL_VERSION_MINOR
        };
 
+       vf->vf_ver = *(struct i40e_virtchnl_version_info *)msg;
+       /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
+       if (VF_IS_V10(vf))
+               info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
        return i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
                                      I40E_SUCCESS, (u8 *)&info,
                                      sizeof(struct
@@ -1143,7 +1143,7 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf)
  *
  * called from the VF to request its resources
  **/
-static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
+static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
 {
        struct i40e_virtchnl_vf_resource *vfres = NULL;
        struct i40e_pf *pf = vf->pf;
@@ -1167,11 +1167,18 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf)
                len = 0;
                goto err;
        }
+       if (VF_IS_V11(vf))
+               vf->driver_caps = *(u32 *)msg;
+       else
+               vf->driver_caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+                                 I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+                                 I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
 
        vfres->vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2;
        vsi = pf->vsi[vf->lan_vsi_idx];
        if (!vsi->info.pvid)
-               vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+               vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
+                                          I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
 
        vfres->num_vsis = num_vsis;
        vfres->num_queue_pairs = vf->num_queue_pairs;
@@ -1773,9 +1780,14 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
                valid_len = sizeof(struct i40e_virtchnl_version_info);
                break;
        case I40E_VIRTCHNL_OP_RESET_VF:
-       case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
                valid_len = 0;
                break;
+       case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+               if (VF_IS_V11(vf))
+                       valid_len = sizeof(u32);
+               else
+                       valid_len = 0;
+               break;
        case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
                valid_len = sizeof(struct i40e_virtchnl_txq_info);
                break;
@@ -1888,10 +1900,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
 
        switch (v_opcode) {
        case I40E_VIRTCHNL_OP_VERSION:
-               ret = i40e_vc_get_version_msg(vf);
+               ret = i40e_vc_get_version_msg(vf, msg);
                break;
        case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
-               ret = i40e_vc_get_vf_resources_msg(vf);
+               ret = i40e_vc_get_vf_resources_msg(vf, msg);
                break;
        case I40E_VIRTCHNL_OP_RESET_VF:
                i40e_vc_reset_vf_msg(vf);
@@ -1969,9 +1981,9 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
                /* read GLGEN_VFLRSTAT register to find out the flr VFs */
                vf = &pf->vf[vf_id];
                reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
-               if (reg & (1 << bit_idx)) {
+               if (reg & BIT(bit_idx)) {
                        /* clear the bit in GLGEN_VFLRSTAT */
-                       wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
+                       wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
 
                        if (!test_bit(__I40E_DOWN, &pf->state))
                                i40e_reset_vf(vf, true);
@@ -2023,7 +2035,8 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
        }
 
        /* delete the temporary mac address */
-       i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
+       i40e_del_filter(vsi, vf->default_lan_addr.addr,
+                       vf->port_vlan_id ? vf->port_vlan_id : -1,
                        true, false);
 
        /* Delete all the filters for this VSI - we're going to kill it
@@ -2088,6 +2101,10 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
                goto error_pvid;
        }
 
+       if (vsi->info.pvid == (vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT)))
+               /* duplicate request, so just return success */
+               goto error_pvid;
+
        if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) {
                dev_err(&pf->pdev->dev,
                        "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
index 09043c1aae5435109fcd2bab8b4059c52c105961..736f6f08b4f26c98042375db16767eefca0fbe66 100644 (file)
@@ -42,6 +42,9 @@
 #define I40E_VLAN_MASK                 0xFFF
 #define I40E_PRIORITY_MASK             0x7000
 
+#define VF_IS_V10(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 0))
+#define VF_IS_V11(_v) (((_v)->vf_ver.major == 1) && ((_v)->vf_ver.minor == 1))
+
 /* Various queue ctrls */
 enum i40e_queue_ctrl {
        I40E_QUEUE_CTRL_UNKNOWN = 0,
@@ -75,6 +78,8 @@ struct i40e_vf {
        u16 vf_id;
        /* all VF vsis connect to the same parent */
        enum i40e_switch_element_types parent_type;
+       struct i40e_virtchnl_version_info vf_ver;
+       u32 driver_caps; /* reported by VF driver */
 
        /* VF Port Extender (PE) stag if used */
        u16 stag;
index e715bccfb5d256c055a99163a1fd0d177c190754..d5bd6f06692137819e557fc719df2f981253daa3 100644 (file)
@@ -34,7 +34,7 @@
  */
 
 #define I40E_FW_API_VERSION_MAJOR      0x0001
-#define I40E_FW_API_VERSION_MINOR      0x0002
+#define I40E_FW_API_VERSION_MINOR      0x0004
 #define I40E_FW_API_VERSION_A0_MINOR  0x0000
 
 struct i40e_aq_desc {
@@ -133,12 +133,7 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_list_func_capabilities     = 0x000A,
        i40e_aqc_opc_list_dev_capabilities      = 0x000B,
 
-       i40e_aqc_opc_set_cppm_configuration     = 0x0103,
-       i40e_aqc_opc_set_arp_proxy_entry        = 0x0104,
-       i40e_aqc_opc_set_ns_proxy_entry         = 0x0105,
-
        /* LAA */
-       i40e_aqc_opc_mng_laa            = 0x0106,   /* AQ obsolete */
        i40e_aqc_opc_mac_address_read   = 0x0107,
        i40e_aqc_opc_mac_address_write  = 0x0108,
 
@@ -260,7 +255,6 @@ enum i40e_admin_queue_opc {
        /* Tunnel commands */
        i40e_aqc_opc_add_udp_tunnel     = 0x0B00,
        i40e_aqc_opc_del_udp_tunnel     = 0x0B01,
-       i40e_aqc_opc_tunnel_key_structure       = 0x0B10,
 
        /* Async Events */
        i40e_aqc_opc_event_lan_overflow         = 0x1001,
@@ -272,8 +266,6 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_oem_ocbb_initialize        = 0xFE03,
 
        /* debug commands */
-       i40e_aqc_opc_debug_get_deviceid         = 0xFF00,
-       i40e_aqc_opc_debug_set_mode             = 0xFF01,
        i40e_aqc_opc_debug_read_reg             = 0xFF03,
        i40e_aqc_opc_debug_write_reg            = 0xFF04,
        i40e_aqc_opc_debug_modify_reg           = 0xFF07,
@@ -507,7 +499,8 @@ struct i40e_aqc_mac_address_read {
 #define I40E_AQC_SAN_ADDR_VALID                0x20
 #define I40E_AQC_PORT_ADDR_VALID       0x40
 #define I40E_AQC_WOL_ADDR_VALID                0x80
-#define I40E_AQC_ADDR_VALID_MASK       0xf0
+#define I40E_AQC_MC_MAG_EN_VALID       0x100
+#define I40E_AQC_ADDR_VALID_MASK       0x1F0
        u8      reserved[6];
        __le32  addr_high;
        __le32  addr_low;
@@ -530,7 +523,9 @@ struct i40e_aqc_mac_address_write {
 #define I40E_AQC_WRITE_TYPE_LAA_ONLY   0x0000
 #define I40E_AQC_WRITE_TYPE_LAA_WOL    0x4000
 #define I40E_AQC_WRITE_TYPE_PORT       0x8000
-#define I40E_AQC_WRITE_TYPE_MASK       0xc000
+#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG      0xC000
+#define I40E_AQC_WRITE_TYPE_MASK       0xC000
+
        __le16  mac_sah;
        __le32  mac_sal;
        u8      reserved[8];
@@ -1066,6 +1061,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
        __le16  seid;
 #define I40E_AQC_VSI_PROM_CMD_SEID_MASK                0x3FF
        __le16  vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_MASK             0x0FFF
 #define I40E_AQC_SET_VSI_VLAN_VALID            0x8000
        u8      reserved[8];
 };
index 39fcb1dc4ea64d80b3601a3d62ff1052b22907c6..56c7e751149b0cba324c2722178705c42e4c908f 100644 (file)
@@ -71,6 +71,212 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
        return status;
 }
 
+/**
+ * i40evf_aq_str - convert AQ err code to a string
+ * @hw: pointer to the HW structure
+ * @aq_err: the AQ error code to convert
+ **/
+char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
+{
+       switch (aq_err) {
+       case I40E_AQ_RC_OK:
+               return "OK";
+       case I40E_AQ_RC_EPERM:
+               return "I40E_AQ_RC_EPERM";
+       case I40E_AQ_RC_ENOENT:
+               return "I40E_AQ_RC_ENOENT";
+       case I40E_AQ_RC_ESRCH:
+               return "I40E_AQ_RC_ESRCH";
+       case I40E_AQ_RC_EINTR:
+               return "I40E_AQ_RC_EINTR";
+       case I40E_AQ_RC_EIO:
+               return "I40E_AQ_RC_EIO";
+       case I40E_AQ_RC_ENXIO:
+               return "I40E_AQ_RC_ENXIO";
+       case I40E_AQ_RC_E2BIG:
+               return "I40E_AQ_RC_E2BIG";
+       case I40E_AQ_RC_EAGAIN:
+               return "I40E_AQ_RC_EAGAIN";
+       case I40E_AQ_RC_ENOMEM:
+               return "I40E_AQ_RC_ENOMEM";
+       case I40E_AQ_RC_EACCES:
+               return "I40E_AQ_RC_EACCES";
+       case I40E_AQ_RC_EFAULT:
+               return "I40E_AQ_RC_EFAULT";
+       case I40E_AQ_RC_EBUSY:
+               return "I40E_AQ_RC_EBUSY";
+       case I40E_AQ_RC_EEXIST:
+               return "I40E_AQ_RC_EEXIST";
+       case I40E_AQ_RC_EINVAL:
+               return "I40E_AQ_RC_EINVAL";
+       case I40E_AQ_RC_ENOTTY:
+               return "I40E_AQ_RC_ENOTTY";
+       case I40E_AQ_RC_ENOSPC:
+               return "I40E_AQ_RC_ENOSPC";
+       case I40E_AQ_RC_ENOSYS:
+               return "I40E_AQ_RC_ENOSYS";
+       case I40E_AQ_RC_ERANGE:
+               return "I40E_AQ_RC_ERANGE";
+       case I40E_AQ_RC_EFLUSHED:
+               return "I40E_AQ_RC_EFLUSHED";
+       case I40E_AQ_RC_BAD_ADDR:
+               return "I40E_AQ_RC_BAD_ADDR";
+       case I40E_AQ_RC_EMODE:
+               return "I40E_AQ_RC_EMODE";
+       case I40E_AQ_RC_EFBIG:
+               return "I40E_AQ_RC_EFBIG";
+       }
+
+       snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err);
+       return hw->err_str;
+}
+
+/**
+ * i40evf_stat_str - convert status err code to a string
+ * @hw: pointer to the HW structure
+ * @stat_err: the status error code to convert
+ **/
+char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err)
+{
+       switch (stat_err) {
+       case 0:
+               return "OK";
+       case I40E_ERR_NVM:
+               return "I40E_ERR_NVM";
+       case I40E_ERR_NVM_CHECKSUM:
+               return "I40E_ERR_NVM_CHECKSUM";
+       case I40E_ERR_PHY:
+               return "I40E_ERR_PHY";
+       case I40E_ERR_CONFIG:
+               return "I40E_ERR_CONFIG";
+       case I40E_ERR_PARAM:
+               return "I40E_ERR_PARAM";
+       case I40E_ERR_MAC_TYPE:
+               return "I40E_ERR_MAC_TYPE";
+       case I40E_ERR_UNKNOWN_PHY:
+               return "I40E_ERR_UNKNOWN_PHY";
+       case I40E_ERR_LINK_SETUP:
+               return "I40E_ERR_LINK_SETUP";
+       case I40E_ERR_ADAPTER_STOPPED:
+               return "I40E_ERR_ADAPTER_STOPPED";
+       case I40E_ERR_INVALID_MAC_ADDR:
+               return "I40E_ERR_INVALID_MAC_ADDR";
+       case I40E_ERR_DEVICE_NOT_SUPPORTED:
+               return "I40E_ERR_DEVICE_NOT_SUPPORTED";
+       case I40E_ERR_MASTER_REQUESTS_PENDING:
+               return "I40E_ERR_MASTER_REQUESTS_PENDING";
+       case I40E_ERR_INVALID_LINK_SETTINGS:
+               return "I40E_ERR_INVALID_LINK_SETTINGS";
+       case I40E_ERR_AUTONEG_NOT_COMPLETE:
+               return "I40E_ERR_AUTONEG_NOT_COMPLETE";
+       case I40E_ERR_RESET_FAILED:
+               return "I40E_ERR_RESET_FAILED";
+       case I40E_ERR_SWFW_SYNC:
+               return "I40E_ERR_SWFW_SYNC";
+       case I40E_ERR_NO_AVAILABLE_VSI:
+               return "I40E_ERR_NO_AVAILABLE_VSI";
+       case I40E_ERR_NO_MEMORY:
+               return "I40E_ERR_NO_MEMORY";
+       case I40E_ERR_BAD_PTR:
+               return "I40E_ERR_BAD_PTR";
+       case I40E_ERR_RING_FULL:
+               return "I40E_ERR_RING_FULL";
+       case I40E_ERR_INVALID_PD_ID:
+               return "I40E_ERR_INVALID_PD_ID";
+       case I40E_ERR_INVALID_QP_ID:
+               return "I40E_ERR_INVALID_QP_ID";
+       case I40E_ERR_INVALID_CQ_ID:
+               return "I40E_ERR_INVALID_CQ_ID";
+       case I40E_ERR_INVALID_CEQ_ID:
+               return "I40E_ERR_INVALID_CEQ_ID";
+       case I40E_ERR_INVALID_AEQ_ID:
+               return "I40E_ERR_INVALID_AEQ_ID";
+       case I40E_ERR_INVALID_SIZE:
+               return "I40E_ERR_INVALID_SIZE";
+       case I40E_ERR_INVALID_ARP_INDEX:
+               return "I40E_ERR_INVALID_ARP_INDEX";
+       case I40E_ERR_INVALID_FPM_FUNC_ID:
+               return "I40E_ERR_INVALID_FPM_FUNC_ID";
+       case I40E_ERR_QP_INVALID_MSG_SIZE:
+               return "I40E_ERR_QP_INVALID_MSG_SIZE";
+       case I40E_ERR_QP_TOOMANY_WRS_POSTED:
+               return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
+       case I40E_ERR_INVALID_FRAG_COUNT:
+               return "I40E_ERR_INVALID_FRAG_COUNT";
+       case I40E_ERR_QUEUE_EMPTY:
+               return "I40E_ERR_QUEUE_EMPTY";
+       case I40E_ERR_INVALID_ALIGNMENT:
+               return "I40E_ERR_INVALID_ALIGNMENT";
+       case I40E_ERR_FLUSHED_QUEUE:
+               return "I40E_ERR_FLUSHED_QUEUE";
+       case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
+               return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
+       case I40E_ERR_INVALID_IMM_DATA_SIZE:
+               return "I40E_ERR_INVALID_IMM_DATA_SIZE";
+       case I40E_ERR_TIMEOUT:
+               return "I40E_ERR_TIMEOUT";
+       case I40E_ERR_OPCODE_MISMATCH:
+               return "I40E_ERR_OPCODE_MISMATCH";
+       case I40E_ERR_CQP_COMPL_ERROR:
+               return "I40E_ERR_CQP_COMPL_ERROR";
+       case I40E_ERR_INVALID_VF_ID:
+               return "I40E_ERR_INVALID_VF_ID";
+       case I40E_ERR_INVALID_HMCFN_ID:
+               return "I40E_ERR_INVALID_HMCFN_ID";
+       case I40E_ERR_BACKING_PAGE_ERROR:
+               return "I40E_ERR_BACKING_PAGE_ERROR";
+       case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
+               return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
+       case I40E_ERR_INVALID_PBLE_INDEX:
+               return "I40E_ERR_INVALID_PBLE_INDEX";
+       case I40E_ERR_INVALID_SD_INDEX:
+               return "I40E_ERR_INVALID_SD_INDEX";
+       case I40E_ERR_INVALID_PAGE_DESC_INDEX:
+               return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
+       case I40E_ERR_INVALID_SD_TYPE:
+               return "I40E_ERR_INVALID_SD_TYPE";
+       case I40E_ERR_MEMCPY_FAILED:
+               return "I40E_ERR_MEMCPY_FAILED";
+       case I40E_ERR_INVALID_HMC_OBJ_INDEX:
+               return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
+       case I40E_ERR_INVALID_HMC_OBJ_COUNT:
+               return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
+       case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
+               return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
+       case I40E_ERR_SRQ_ENABLED:
+               return "I40E_ERR_SRQ_ENABLED";
+       case I40E_ERR_ADMIN_QUEUE_ERROR:
+               return "I40E_ERR_ADMIN_QUEUE_ERROR";
+       case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
+               return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
+       case I40E_ERR_BUF_TOO_SHORT:
+               return "I40E_ERR_BUF_TOO_SHORT";
+       case I40E_ERR_ADMIN_QUEUE_FULL:
+               return "I40E_ERR_ADMIN_QUEUE_FULL";
+       case I40E_ERR_ADMIN_QUEUE_NO_WORK:
+               return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
+       case I40E_ERR_BAD_IWARP_CQE:
+               return "I40E_ERR_BAD_IWARP_CQE";
+       case I40E_ERR_NVM_BLANK_MODE:
+               return "I40E_ERR_NVM_BLANK_MODE";
+       case I40E_ERR_NOT_IMPLEMENTED:
+               return "I40E_ERR_NOT_IMPLEMENTED";
+       case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
+               return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
+       case I40E_ERR_DIAG_TEST_FAILED:
+               return "I40E_ERR_DIAG_TEST_FAILED";
+       case I40E_ERR_NOT_READY:
+               return "I40E_ERR_NOT_READY";
+       case I40E_NOT_SUPPORTED:
+               return "I40E_NOT_SUPPORTED";
+       case I40E_ERR_FIRMWARE_API_VERSION:
+               return "I40E_ERR_FIRMWARE_API_VERSION";
+       }
+
+       snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
+       return hw->err_str;
+}
+
 /**
  * i40evf_debug_aq
  * @hw: debug mask related to admin queue
index 931c880443003d7aa3a1787ddd1b5f27241236cd..00ed24bfce1347f0b80b077139a754e58c6da376 100644 (file)
@@ -62,6 +62,7 @@ struct i40e_hmc_bp {
 struct i40e_hmc_pd_entry {
        struct i40e_hmc_bp bp;
        u32 sd_index;
+       bool rsrc_pg;
        bool valid;
 };
 
@@ -126,8 +127,8 @@ struct i40e_hmc_info {
                 I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |              \
                ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<            \
                I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |                  \
-               (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);            \
-       val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);      \
+               BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);              \
+       val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);     \
        wr32((hw), I40E_PFHMC_SDDATAHIGH, val1);                        \
        wr32((hw), I40E_PFHMC_SDDATALOW, val2);                         \
        wr32((hw), I40E_PFHMC_SDCMD, val3);                             \
@@ -146,7 +147,7 @@ struct i40e_hmc_info {
                I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |               \
                ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<            \
                I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);                   \
-       val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);      \
+       val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT);     \
        wr32((hw), I40E_PFHMC_SDDATAHIGH, 0);                           \
        wr32((hw), I40E_PFHMC_SDDATALOW, val2);                         \
        wr32((hw), I40E_PFHMC_SDCMD, val3);                             \
@@ -218,7 +219,8 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
 
 i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
                                              struct i40e_hmc_info *hmc_info,
-                                             u32 pd_index);
+                                             u32 pd_index,
+                                             struct i40e_dma_mem *rsrc_pg);
 i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
                                        struct i40e_hmc_info *hmc_info,
                                        u32 idx);
index 58e37a44b80a10233f00d004ee8fb9f5d496c12e..856eb9d06595eb7eff8ef66df8df0eb137751808 100644 (file)
@@ -60,6 +60,8 @@ void i40e_idle_aq(struct i40e_hw *hw);
 void i40evf_resume_aq(struct i40e_hw *hw);
 bool i40evf_check_asq_alive(struct i40e_hw *hw);
 i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
+char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err);
 
 i40e_status i40e_set_mac_type(struct i40e_hw *hw);
 
index 395f32f226c08ac924e7d3e707ef7124b2744ec5..60f88e4ad065ebdfe609e5e94cdce94e5a410d3c 100644 (file)
@@ -404,7 +404,7 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
         *  20-1249MB/s bulk   (8000 ints/s)
         */
        bytes_per_int = rc->total_bytes / rc->itr;
-       switch (rc->itr) {
+       switch (new_latency_range) {
        case I40E_LOWEST_LATENCY:
                if (bytes_per_int > 10)
                        new_latency_range = I40E_LOW_LATENCY;
@@ -417,9 +417,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
                break;
        case I40E_BULK_LATENCY:
                if (bytes_per_int <= 20)
-                       rc->latency_range = I40E_LOW_LATENCY;
+                       new_latency_range = I40E_LOW_LATENCY;
+               break;
+       default:
+               if (bytes_per_int <= 20)
+                       new_latency_range = I40E_LOW_LATENCY;
                break;
        }
+       rc->latency_range = new_latency_range;
 
        switch (new_latency_range) {
        case I40E_LOWEST_LATENCY:
@@ -435,42 +440,14 @@ static void i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
                break;
        }
 
-       if (new_itr != rc->itr) {
-               /* do an exponential smoothing */
-               new_itr = (10 * new_itr * rc->itr) /
-                         ((9 * new_itr) + rc->itr);
-               rc->itr = new_itr & I40E_MAX_ITR;
-       }
+       if (new_itr != rc->itr)
+               rc->itr = new_itr;
 
        rc->total_bytes = 0;
        rc->total_packets = 0;
 }
 
-/**
- * i40e_update_dynamic_itr - Adjust ITR based on bytes per int
- * @q_vector: the vector to adjust
- **/
-static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
-{
-       u16 vector = q_vector->vsi->base_vector + q_vector->v_idx;
-       struct i40e_hw *hw = &q_vector->vsi->back->hw;
-       u32 reg_addr;
-       u16 old_itr;
-
-       reg_addr = I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1);
-       old_itr = q_vector->rx.itr;
-       i40e_set_new_dynamic_itr(&q_vector->rx);
-       if (old_itr != q_vector->rx.itr)
-               wr32(hw, reg_addr, q_vector->rx.itr);
-
-       reg_addr = I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1);
-       old_itr = q_vector->tx.itr;
-       i40e_set_new_dynamic_itr(&q_vector->tx);
-       if (old_itr != q_vector->tx.itr)
-               wr32(hw, reg_addr, q_vector->tx.itr);
-}
-
-/**
+/*
  * i40evf_setup_tx_descriptors - Allocate the Tx descriptors
  * @tx_ring: the tx ring to set up
  *
@@ -873,7 +850,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                return;
 
        /* did the hardware decode the packet and checksum? */
-       if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+       if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
                return;
 
        /* both known and outer_ip must be set for the below code to work */
@@ -888,25 +865,25 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                ipv6 = true;
 
        if (ipv4 &&
-           (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
-                        (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+           (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
+                        BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT))))
                goto checksum_fail;
 
        /* likely incorrect csum if alternate IP extension headers found */
        if (ipv6 &&
-           rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+           rx_status & BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
                /* don't increment checksum err here, non-fatal err */
                return;
 
        /* there was some L4 error, count error and punt packet to the stack */
-       if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+       if (rx_error & BIT(I40E_RX_DESC_ERROR_L4E_SHIFT))
                goto checksum_fail;
 
        /* handle packets that were not able to be checksummed due
         * to arrival speed, in this case the stack can compute
         * the csum.
         */
-       if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
+       if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
                return;
 
        /* If VXLAN traffic has an outer UDPv4 checksum we need to check
@@ -1027,7 +1004,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
                        I40E_RXD_QW1_STATUS_SHIFT;
 
-               if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+               if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
                        break;
 
                /* This memory barrier is needed to keep us from reading
@@ -1063,8 +1040,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
 
                rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
                           I40E_RXD_QW1_ERROR_SHIFT;
-               rx_hbo = rx_error & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
-               rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+               rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+               rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
 
                rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
                           I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1116,7 +1093,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                I40E_RX_INCREMENT(rx_ring, i);
 
                if (unlikely(
-                   !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+                   !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
                        struct i40e_rx_buffer *next_buffer;
 
                        next_buffer = &rx_ring->rx_bi[i];
@@ -1126,7 +1103,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
                }
 
                /* ERR_MASK will only have valid bits if EOP set */
-               if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+               if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
                        dev_kfree_skb_any(skb);
                        continue;
                }
@@ -1141,7 +1118,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
 
                i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
 
-               vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+               vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
                         ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
                         : 0;
 #ifdef I40E_FCOE
@@ -1202,7 +1179,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
                rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
                        I40E_RXD_QW1_STATUS_SHIFT;
 
-               if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+               if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
                        break;
 
                /* This memory barrier is needed to keep us from reading
@@ -1220,7 +1197,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
 
                rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
                           I40E_RXD_QW1_ERROR_SHIFT;
-               rx_error &= ~(1 << I40E_RX_DESC_ERROR_HBO_SHIFT);
+               rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
 
                rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
                           I40E_RXD_QW1_PTYPE_SHIFT;
@@ -1238,13 +1215,13 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
                I40E_RX_INCREMENT(rx_ring, i);
 
                if (unlikely(
-                   !(rx_status & (1 << I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
+                   !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
                        rx_ring->rx_stats.non_eop_descs++;
                        continue;
                }
 
                /* ERR_MASK will only have valid bits if EOP set */
-               if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+               if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
                        dev_kfree_skb_any(skb);
                        /* TODO: shouldn't we increment a counter indicating the
                         * drop?
@@ -1262,7 +1239,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
 
                i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
 
-               vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
+               vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
                         ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
                         : 0;
                i40e_receive_skb(rx_ring, skb, vlan_tag);
@@ -1280,6 +1257,67 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
        return total_rx_packets;
 }
 
+/**
+ * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
+ * @vsi: the VSI we care about
+ * @q_vector: q_vector for which itr is being updated and interrupt enabled
+ *
+ **/
+static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
+                                         struct i40e_q_vector *q_vector)
+{
+       struct i40e_hw *hw = &vsi->back->hw;
+       u16 old_itr;
+       int vector;
+       u32 val;
+
+       vector = (q_vector->v_idx + vsi->base_vector);
+       if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) {
+               old_itr = q_vector->rx.itr;
+               i40e_set_new_dynamic_itr(&q_vector->rx);
+               if (old_itr != q_vector->rx.itr) {
+                       val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
+                       I40E_VFINT_DYN_CTLN_CLEARPBA_MASK |
+                       (I40E_RX_ITR <<
+                               I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+                       (q_vector->rx.itr <<
+                               I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT);
+               } else {
+                       val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
+                       I40E_VFINT_DYN_CTLN_CLEARPBA_MASK |
+                       (I40E_ITR_NONE <<
+                               I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT);
+               }
+               if (!test_bit(__I40E_DOWN, &vsi->state))
+                       wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
+       } else {
+               i40evf_irq_enable_queues(vsi->back, 1
+                       << q_vector->v_idx);
+       }
+       if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) {
+               old_itr = q_vector->tx.itr;
+               i40e_set_new_dynamic_itr(&q_vector->tx);
+               if (old_itr != q_vector->tx.itr) {
+                       val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
+                               I40E_VFINT_DYN_CTLN_CLEARPBA_MASK |
+                               (I40E_TX_ITR <<
+                                  I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT) |
+                               (q_vector->tx.itr <<
+                                  I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT);
+
+               } else {
+                       val = I40E_VFINT_DYN_CTLN_INTENA_MASK |
+                               I40E_VFINT_DYN_CTLN_CLEARPBA_MASK |
+                               (I40E_ITR_NONE <<
+                                  I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT);
+               }
+               if (!test_bit(__I40E_DOWN, &vsi->state))
+                       wr32(hw, I40E_VFINT_DYN_CTLN1(vector - 1), val);
+       } else {
+               i40evf_irq_enable_queues(vsi->back, BIT(q_vector->v_idx));
+       }
+}
+
 /**
  * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine
  * @napi: napi struct with our devices info in it
@@ -1336,13 +1374,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
 
        /* Work is done so exit the polling mode and re-enable the interrupt */
        napi_complete(napi);
-       if (ITR_IS_DYNAMIC(vsi->rx_itr_setting) ||
-           ITR_IS_DYNAMIC(vsi->tx_itr_setting))
-               i40e_update_dynamic_itr(q_vector);
-
-       if (!test_bit(__I40E_DOWN, &vsi->state))
-               i40evf_irq_enable_queues(vsi->back, 1 << q_vector->v_idx);
-
+       i40e_update_enable_itr(vsi, q_vector);
        return 0;
 }
 
@@ -1841,6 +1873,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
            netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
                                                   tx_ring->queue_index)))
                writel(i, tx_ring->tail);
+       else
+               prefetchw(tx_desc + 1);
 
        return;
 
index e7a34f899f2cbb8150495a31e0690a95e90efc1a..6b47c818d1f08c11b81fd5e21b7ba9b162bdadd9 100644 (file)
@@ -66,17 +66,17 @@ enum i40e_dyn_idx_t {
 
 /* Supported RSS offloads */
 #define I40E_DEFAULT_RSS_HENA ( \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD))
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+       BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
 
 /* Supported Rx Buffer Sizes */
 #define I40E_RXBUFFER_512   512    /* Used for packet split */
@@ -129,16 +129,16 @@ enum i40e_dyn_idx_t {
 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
 #define I40E_MIN_DESC_PENDING  4
 
-#define I40E_TX_FLAGS_CSUM             (u32)(1)
-#define I40E_TX_FLAGS_HW_VLAN          (u32)(1 << 1)
-#define I40E_TX_FLAGS_SW_VLAN          (u32)(1 << 2)
-#define I40E_TX_FLAGS_TSO              (u32)(1 << 3)
-#define I40E_TX_FLAGS_IPV4             (u32)(1 << 4)
-#define I40E_TX_FLAGS_IPV6             (u32)(1 << 5)
-#define I40E_TX_FLAGS_FCCRC            (u32)(1 << 6)
-#define I40E_TX_FLAGS_FSO              (u32)(1 << 7)
-#define I40E_TX_FLAGS_FD_SB            (u32)(1 << 9)
-#define I40E_TX_FLAGS_VXLAN_TUNNEL     (u32)(1 << 10)
+#define I40E_TX_FLAGS_CSUM             BIT(0)
+#define I40E_TX_FLAGS_HW_VLAN          BIT(1)
+#define I40E_TX_FLAGS_SW_VLAN          BIT(2)
+#define I40E_TX_FLAGS_TSO              BIT(3)
+#define I40E_TX_FLAGS_IPV4             BIT(4)
+#define I40E_TX_FLAGS_IPV6             BIT(5)
+#define I40E_TX_FLAGS_FCCRC            BIT(6)
+#define I40E_TX_FLAGS_FSO              BIT(7)
+#define I40E_TX_FLAGS_FD_SB            BIT(9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL     BIT(10)
 #define I40E_TX_FLAGS_VLAN_MASK                0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK   0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT  29
index c463ec41579c708ffbe606ea0b31ca0485ea4c58..4ba9a012dcbac1bdbc87366df00f9d85c4e207f0 100644 (file)
@@ -213,7 +213,17 @@ struct i40e_hw_capabilities {
        bool dcb;
        bool fcoe;
        bool iscsi; /* Indicates iSCSI enabled */
-       bool mfp_mode_1;
+       bool flex10_enable;
+       bool flex10_capable;
+       u32  flex10_mode;
+#define I40E_FLEX10_MODE_UNKNOWN       0x0
+#define I40E_FLEX10_MODE_DCC           0x1
+#define I40E_FLEX10_MODE_DCI           0x2
+
+       u32 flex10_status;
+#define I40E_FLEX10_STATUS_DCC_ERROR   0x1
+#define I40E_FLEX10_STATUS_VC_MODE     0x2
+
        bool mgmt_cem;
        bool ieee_1588;
        bool iwarp;
@@ -481,6 +491,7 @@ struct i40e_hw {
 
        /* debug mask */
        u32 debug_mask;
+       char err_str[16];
 };
 
 static inline bool i40e_is_vf(struct i40e_hw *hw)
@@ -594,7 +605,7 @@ enum i40e_rx_desc_status_bits {
 };
 
 #define I40E_RXD_QW1_STATUS_SHIFT      0
-#define I40E_RXD_QW1_STATUS_MASK       (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+#define I40E_RXD_QW1_STATUS_MASK       ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \
                                         << I40E_RXD_QW1_STATUS_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT   I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
@@ -602,8 +613,8 @@ enum i40e_rx_desc_status_bits {
                                             I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
 
 #define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT  I40E_RX_DESC_STATUS_TSYNVALID_SHIFT
-#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK     (0x1UL << \
-                                        I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
+#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \
+                                   BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT)
 
 enum i40e_rx_desc_fltstat_values {
        I40E_RX_DESC_FLTSTAT_NO_DATA    = 0,
@@ -737,8 +748,7 @@ enum i40e_rx_ptype_payload_layer {
                                         I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
 
 #define I40E_RXD_QW1_LENGTH_SPH_SHIFT  63
-#define I40E_RXD_QW1_LENGTH_SPH_MASK   (0x1ULL << \
-                                        I40E_RXD_QW1_LENGTH_SPH_SHIFT)
+#define I40E_RXD_QW1_LENGTH_SPH_MASK   BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
 
 enum i40e_rx_desc_ext_status_bits {
        /* Note: These are predefined bit offsets */
@@ -914,12 +924,12 @@ enum i40e_tx_ctx_desc_eipt_offload {
 #define I40E_TXD_CTX_QW0_NATT_SHIFT    9
 #define I40E_TXD_CTX_QW0_NATT_MASK     (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
-#define I40E_TXD_CTX_UDP_TUNNELING     (0x1ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
+#define I40E_TXD_CTX_UDP_TUNNELING     BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
 #define I40E_TXD_CTX_GRE_TUNNELING     (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
 #define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT       11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK        (0x1ULL << \
-                                        I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
+#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
+                                      BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
 
 #define I40E_TXD_CTX_EIP_NOINC_IPID_CONST      I40E_TXD_CTX_QW0_EIP_NOINC_MASK
 
@@ -984,8 +994,8 @@ enum i40e_filter_program_desc_fd_status {
 };
 
 #define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT       23
-#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK        (0x1FFUL << \
-                                        I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
+#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK \
+                                      BIT_ULL(I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CMD_SHIFT    4
 #define I40E_TXD_FLTR_QW1_CMD_MASK     (0xFFFFULL << \
@@ -1003,8 +1013,7 @@ enum i40e_filter_program_desc_pcmd {
 #define I40E_TXD_FLTR_QW1_DEST_MASK    (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT        (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK (0x1ULL << \
-                                        I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
+#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT      (0x9ULL + \
                                                 I40E_TXD_FLTR_QW1_CMD_SHIFT)
@@ -1109,6 +1118,8 @@ struct i40e_hw_port_stats {
        u64 fd_atr_match;
        u64 fd_sb_match;
        u64 fd_atr_tunnel_match;
+       u32 fd_atr_status;
+       u32 fd_sb_status;
        /* EEE LPI */
        u32 tx_lpi_status;
        u32 rx_lpi_status;
index 59f62f0e65dd3ecaf230d2aa433452cc68bfa8d2..1e89dea0d52925e0c18bab22da4266594da02d36 100644 (file)
@@ -110,7 +110,9 @@ struct i40e_virtchnl_msg {
  * error regardless of version mismatch.
  */
 #define I40E_VIRTCHNL_VERSION_MAJOR            1
-#define I40E_VIRTCHNL_VERSION_MINOR            0
+#define I40E_VIRTCHNL_VERSION_MINOR            1
+#define I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS 0
+
 struct i40e_virtchnl_version_info {
        u32 major;
        u32 minor;
@@ -129,7 +131,8 @@ struct i40e_virtchnl_version_info {
  */
 
 /* I40E_VIRTCHNL_OP_GET_VF_RESOURCES
- * VF sends this request to PF with no parameters
+ * Version 1.0 VF sends this request to PF with no parameters
+ * Version 1.1 VF sends this request to PF with u32 bitmap of its capabilities
  * PF responds with an indirect message containing
  * i40e_virtchnl_vf_resource and one or more
  * i40e_virtchnl_vsi_resource structures.
@@ -143,9 +146,12 @@ struct i40e_virtchnl_vsi_resource {
        u8 default_mac_addr[ETH_ALEN];
 };
 /* VF offload flags */
-#define I40E_VIRTCHNL_VF_OFFLOAD_L2    0x00000001
-#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE  0x00000004
-#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN  0x00010000
+#define I40E_VIRTCHNL_VF_OFFLOAD_L2            0x00000001
+#define I40E_VIRTCHNL_VF_OFFLOAD_IWARP         0x00000002
+#define I40E_VIRTCHNL_VF_OFFLOAD_FCOE          0x00000004
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ                0x00000008
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG       0x00000010
+#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN          0x00010000
 
 struct i40e_virtchnl_vf_resource {
        u16 num_vsis;
index fea3b75a9a35fcdc58b9d5f5d0f6125dbf62e0cf..c33c7cce52fe2c5decf79e514a342275cb7f2f3a 100644 (file)
@@ -207,17 +207,17 @@ struct i40evf_adapter {
        struct msix_entry *msix_entries;
 
        u32 flags;
-#define I40EVF_FLAG_RX_CSUM_ENABLED              (u32)(1)
-#define I40EVF_FLAG_RX_1BUF_CAPABLE              (u32)(1 << 1)
-#define I40EVF_FLAG_RX_PS_CAPABLE                (u32)(1 << 2)
-#define I40EVF_FLAG_RX_PS_ENABLED                (u32)(1 << 3)
-#define I40EVF_FLAG_IN_NETPOLL                   (u32)(1 << 4)
-#define I40EVF_FLAG_IMIR_ENABLED                 (u32)(1 << 5)
-#define I40EVF_FLAG_MQ_CAPABLE                   (u32)(1 << 6)
-#define I40EVF_FLAG_NEED_LINK_UPDATE             (u32)(1 << 7)
-#define I40EVF_FLAG_PF_COMMS_FAILED              (u32)(1 << 8)
-#define I40EVF_FLAG_RESET_PENDING                (u32)(1 << 9)
-#define I40EVF_FLAG_RESET_NEEDED                 (u32)(1 << 10)
+#define I40EVF_FLAG_RX_CSUM_ENABLED              BIT(0)
+#define I40EVF_FLAG_RX_1BUF_CAPABLE              BIT(1)
+#define I40EVF_FLAG_RX_PS_CAPABLE                BIT(2)
+#define I40EVF_FLAG_RX_PS_ENABLED                BIT(3)
+#define I40EVF_FLAG_IN_NETPOLL                   BIT(4)
+#define I40EVF_FLAG_IMIR_ENABLED                 BIT(5)
+#define I40EVF_FLAG_MQ_CAPABLE                   BIT(6)
+#define I40EVF_FLAG_NEED_LINK_UPDATE             BIT(7)
+#define I40EVF_FLAG_PF_COMMS_FAILED              BIT(8)
+#define I40EVF_FLAG_RESET_PENDING                BIT(9)
+#define I40EVF_FLAG_RESET_NEEDED                 BIT(10)
 /* duplcates for common code */
 #define I40E_FLAG_FDIR_ATR_ENABLED              0
 #define I40E_FLAG_DCB_ENABLED                   0
@@ -225,15 +225,16 @@ struct i40evf_adapter {
 #define I40E_FLAG_RX_CSUM_ENABLED                I40EVF_FLAG_RX_CSUM_ENABLED
        /* flags for admin queue service task */
        u32 aq_required;
-#define I40EVF_FLAG_AQ_ENABLE_QUEUES           (u32)(1)
-#define I40EVF_FLAG_AQ_DISABLE_QUEUES          (u32)(1 << 1)
-#define I40EVF_FLAG_AQ_ADD_MAC_FILTER          (u32)(1 << 2)
-#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER         (u32)(1 << 3)
-#define I40EVF_FLAG_AQ_DEL_MAC_FILTER          (u32)(1 << 4)
-#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER         (u32)(1 << 5)
-#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES                (u32)(1 << 6)
-#define I40EVF_FLAG_AQ_MAP_VECTORS             (u32)(1 << 7)
-#define I40EVF_FLAG_AQ_HANDLE_RESET            (u32)(1 << 8)
+#define I40EVF_FLAG_AQ_ENABLE_QUEUES           BIT(0)
+#define I40EVF_FLAG_AQ_DISABLE_QUEUES          BIT(1)
+#define I40EVF_FLAG_AQ_ADD_MAC_FILTER          BIT(2)
+#define I40EVF_FLAG_AQ_ADD_VLAN_FILTER         BIT(3)
+#define I40EVF_FLAG_AQ_DEL_MAC_FILTER          BIT(4)
+#define I40EVF_FLAG_AQ_DEL_VLAN_FILTER         BIT(5)
+#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES                BIT(6)
+#define I40EVF_FLAG_AQ_MAP_VECTORS             BIT(7)
+#define I40EVF_FLAG_AQ_HANDLE_RESET            BIT(8)
+#define I40EVF_FLAG_AQ_GET_CONFIG              BIT(10)
 
        /* OS defined structs */
        struct net_device *netdev;
@@ -249,8 +250,17 @@ struct i40evf_adapter {
        bool netdev_registered;
        bool link_up;
        enum i40e_virtchnl_ops current_op;
+#define CLIENT_ENABLED(_a) ((_a)->vf_res->vf_offload_flags & \
+                           I40E_VIRTCHNL_VF_OFFLOAD_IWARP)
+#define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \
+                   I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \
+                         I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
        struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
        struct i40e_virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
+       struct i40e_virtchnl_version_info pf_version;
+#define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
+                      ((_a)->pf_version.minor == 1))
        u16 msg_enable;
        struct i40e_eth_stats current_stats;
        struct i40e_vsi vsi;
@@ -264,6 +274,7 @@ extern const char i40evf_driver_version[];
 
 int i40evf_up(struct i40evf_adapter *adapter);
 void i40evf_down(struct i40evf_adapter *adapter);
+int i40evf_process_config(struct i40evf_adapter *adapter);
 void i40evf_reset(struct i40evf_adapter *adapter);
 void i40evf_set_ethtool_ops(struct net_device *netdev);
 void i40evf_update_stats(struct i40evf_adapter *adapter);
index 2b53c870e7f113ca0695afab3636446e1015e4e8..4790437a50ac0d3e7f94b2733acf8e7c50c3f18e 100644 (file)
@@ -381,11 +381,11 @@ static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
 
        switch (cmd->flow_type) {
        case TCP_V4_FLOW:
-               if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
+               if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                break;
        case UDP_V4_FLOW:
-               if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
+               if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                break;
 
@@ -397,11 +397,11 @@ static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
                break;
 
        case TCP_V6_FLOW:
-               if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
+               if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                break;
        case UDP_V6_FLOW:
-               if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
+               if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                break;
 
@@ -479,10 +479,10 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
        case TCP_V4_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+                       hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+                       hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
                        break;
                default:
                        return -EINVAL;
@@ -491,10 +491,10 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
        case TCP_V6_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+                       hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+                       hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
                        break;
                default:
                        return -EINVAL;
@@ -503,12 +503,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
        case UDP_V4_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-                                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+                       hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-                                ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+                       hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+                                BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
                        break;
                default:
                        return -EINVAL;
@@ -517,12 +517,12 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
        case UDP_V6_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-                                 ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+                       hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
                        break;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
-                       hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-                                ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+                       hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+                                BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
                        break;
                default:
                        return -EINVAL;
@@ -535,7 +535,7 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
                if ((nfc->data & RXH_L4_B_0_1) ||
                    (nfc->data & RXH_L4_B_2_3))
                        return -EINVAL;
-               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+               hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
                break;
        case AH_ESP_V6_FLOW:
        case AH_V6_FLOW:
@@ -544,15 +544,15 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
                if ((nfc->data & RXH_L4_B_0_1) ||
                    (nfc->data & RXH_L4_B_2_3))
                        return -EINVAL;
-               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+               hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
                break;
        case IPV4_FLOW:
-               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
-                       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+               hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+                        BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
                break;
        case IPV6_FLOW:
-               hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
-                       ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+               hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+                        BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
                break;
        default:
                return -EINVAL;
index 4ab4ebba07a18e5b1b0539cf0c0b8a7122f6fdc2..1503cad918d88d42a559ecf4af15692751e3ba13 100644 (file)
@@ -34,7 +34,7 @@ char i40evf_driver_name[] = "i40evf";
 static const char i40evf_driver_string[] =
        "Intel(R) XL710/X710 Virtual Function Network Driver";
 
-#define DRV_VERSION "1.2.25"
+#define DRV_VERSION "1.3.2"
 const char i40evf_driver_version[] = DRV_VERSION;
 static const char i40evf_copyright[] =
        "Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -240,7 +240,7 @@ void i40evf_irq_enable_queues(struct i40evf_adapter *adapter, u32 mask)
        int i;
 
        for (i = 1; i < adapter->num_msix_vectors; i++) {
-               if (mask & (1 << (i - 1))) {
+               if (mask & BIT(i - 1)) {
                        wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1),
                             I40E_VFINT_DYN_CTLN1_INTENA_MASK |
                             I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
@@ -268,7 +268,7 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter, u32 mask)
                wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
        }
        for (i = 1; i < adapter->num_msix_vectors; i++) {
-               if (mask & (1 << i)) {
+               if (mask & BIT(i)) {
                        dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
                        dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
                                   I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK |
@@ -377,7 +377,7 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
        q_vector->tx.count++;
        q_vector->tx.latency_range = I40E_LOW_LATENCY;
        q_vector->num_ringpairs++;
-       q_vector->ring_mask |= (1 << t_idx);
+       q_vector->ring_mask |= BIT(t_idx);
 }
 
 /**
@@ -406,7 +406,7 @@ static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
        /* The ideal configuration...
         * We have enough vectors to map one per queue.
         */
-       if (q_vectors == (rxr_remaining * 2)) {
+       if (q_vectors >= (rxr_remaining * 2)) {
                for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
                        i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx);
 
@@ -892,8 +892,10 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
                                        break;
                                }
                        }
+                       if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr))
+                               found = true;
                }
-               if (found) {
+               if (!found) {
                        f->remove = true;
                        adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
                }
@@ -1369,6 +1371,10 @@ static void i40evf_watchdog_task(struct work_struct *work)
                }
                goto watchdog_done;
        }
+       if (adapter->aq_required & I40EVF_FLAG_AQ_GET_CONFIG) {
+               i40evf_send_vf_config_msg(adapter);
+               goto watchdog_done;
+       }
 
        if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_QUEUES) {
                i40evf_disable_queues(adapter);
@@ -1604,7 +1610,8 @@ continue_reset:
                dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
                         err);
 
-       i40evf_map_queues(adapter);
+       adapter->aq_required = I40EVF_FLAG_AQ_GET_CONFIG;
+       adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
 
        /* re-add all MAC filters */
        list_for_each_entry(f, &adapter->mac_filter_list, list) {
@@ -1614,7 +1621,7 @@ continue_reset:
        list_for_each_entry(f, &adapter->vlan_filter_list, list) {
                f->add = true;
        }
-       adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+       adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
        adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
        clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
        i40evf_misc_irq_enable(adapter);
@@ -1856,6 +1863,7 @@ static int i40evf_open(struct net_device *netdev)
        if (err)
                goto err_req_irq;
 
+       i40evf_add_filter(adapter, adapter->hw.mac.addr);
        i40evf_configure(adapter);
 
        err = i40evf_up_complete(adapter);
@@ -1978,6 +1986,62 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
        return -EBUSY;
 }
 
+/**
+ * i40evf_process_config - Process the config information we got from the PF
+ * @adapter: board private structure
+ *
+ * Verify that we have a valid config struct, and set up our netdev features
+ * and our VSI struct.
+ **/
+int i40evf_process_config(struct i40evf_adapter *adapter)
+{
+       struct net_device *netdev = adapter->netdev;
+       int i;
+
+       /* got VF config message back from PF, now we can parse it */
+       for (i = 0; i < adapter->vf_res->num_vsis; i++) {
+               if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+                       adapter->vsi_res = &adapter->vf_res->vsi_res[i];
+       }
+       if (!adapter->vsi_res) {
+               dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
+               return -ENODEV;
+       }
+
+       if (adapter->vf_res->vf_offload_flags
+           & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
+               netdev->vlan_features = netdev->features;
+               netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
+                                   NETIF_F_HW_VLAN_CTAG_RX |
+                                   NETIF_F_HW_VLAN_CTAG_FILTER;
+       }
+       netdev->features |= NETIF_F_HIGHDMA |
+                           NETIF_F_SG |
+                           NETIF_F_IP_CSUM |
+                           NETIF_F_SCTP_CSUM |
+                           NETIF_F_IPV6_CSUM |
+                           NETIF_F_TSO |
+                           NETIF_F_TSO6 |
+                           NETIF_F_RXCSUM |
+                           NETIF_F_GRO;
+
+       /* copy netdev features into list of user selectable features */
+       netdev->hw_features |= netdev->features;
+       netdev->hw_features &= ~NETIF_F_RXCSUM;
+
+       adapter->vsi.id = adapter->vsi_res->vsi_id;
+
+       adapter->vsi.back = adapter;
+       adapter->vsi.base_vector = 1;
+       adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
+       adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
+                                      ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+       adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
+                                      ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
+       adapter->vsi.netdev = adapter->netdev;
+       return 0;
+}
+
 /**
  * i40evf_init_task - worker thread to perform delayed initialization
  * @work: pointer to work_struct containing our data
@@ -1996,10 +2060,9 @@ static void i40evf_init_task(struct work_struct *work)
                                                      struct i40evf_adapter,
                                                      init_task.work);
        struct net_device *netdev = adapter->netdev;
-       struct i40evf_mac_filter *f;
        struct i40e_hw *hw = &adapter->hw;
        struct pci_dev *pdev = adapter->pdev;
-       int i, err, bufsz;
+       int err, bufsz;
 
        switch (adapter->state) {
        case __I40EVF_STARTUP:
@@ -2050,6 +2113,12 @@ static void i40evf_init_task(struct work_struct *work)
                if (err) {
                        if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
                                err = i40evf_send_api_ver(adapter);
+                       else
+                               dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
+                                       adapter->pf_version.major,
+                                       adapter->pf_version.minor,
+                                       I40E_VIRTCHNL_VERSION_MAJOR,
+                                       I40E_VIRTCHNL_VERSION_MINOR);
                        goto err;
                }
                err = i40evf_send_vf_config_msg(adapter);
@@ -2085,42 +2154,15 @@ static void i40evf_init_task(struct work_struct *work)
        default:
                goto err_alloc;
        }
-       /* got VF config message back from PF, now we can parse it */
-       for (i = 0; i < adapter->vf_res->num_vsis; i++) {
-               if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
-                       adapter->vsi_res = &adapter->vf_res->vsi_res[i];
-       }
-       if (!adapter->vsi_res) {
-               dev_err(&pdev->dev, "No LAN VSI found\n");
+       if (i40evf_process_config(adapter))
                goto err_alloc;
-       }
+       adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
 
        adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
 
        netdev->netdev_ops = &i40evf_netdev_ops;
        i40evf_set_ethtool_ops(netdev);
        netdev->watchdog_timeo = 5 * HZ;
-       netdev->features |= NETIF_F_HIGHDMA |
-                           NETIF_F_SG |
-                           NETIF_F_IP_CSUM |
-                           NETIF_F_SCTP_CSUM |
-                           NETIF_F_IPV6_CSUM |
-                           NETIF_F_TSO |
-                           NETIF_F_TSO6 |
-                           NETIF_F_RXCSUM |
-                           NETIF_F_GRO;
-
-       if (adapter->vf_res->vf_offload_flags
-           & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
-               netdev->vlan_features = netdev->features;
-               netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
-                                   NETIF_F_HW_VLAN_CTAG_RX |
-                                   NETIF_F_HW_VLAN_CTAG_FILTER;
-       }
-
-       /* copy netdev features into list of user selectable features */
-       netdev->hw_features |= netdev->features;
-       netdev->hw_features &= ~NETIF_F_RXCSUM;
 
        if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
                dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
@@ -2130,16 +2172,6 @@ static void i40evf_init_task(struct work_struct *work)
        ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
        ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
 
-       f = kzalloc(sizeof(*f), GFP_ATOMIC);
-       if (!f)
-               goto err_sw_init;
-
-       ether_addr_copy(f->macaddr, adapter->hw.mac.addr);
-       f->add = true;
-       adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
-
-       list_add(&f->list, &adapter->mac_filter_list);
-
        init_timer(&adapter->watchdog_timer);
        adapter->watchdog_timer.function = &i40evf_watchdog_timer;
        adapter->watchdog_timer.data = (unsigned long)adapter;
@@ -2161,17 +2193,6 @@ static void i40evf_init_task(struct work_struct *work)
 
        netif_carrier_off(netdev);
 
-       adapter->vsi.id = adapter->vsi_res->vsi_id;
-       adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */
-       adapter->vsi.back = adapter;
-       adapter->vsi.base_vector = 1;
-       adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
-       adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
-                                      ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
-       adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
-                                      ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
-       adapter->vsi.netdev = adapter->netdev;
-
        if (!adapter->netdev_registered) {
                err = register_netdev(netdev);
                if (err)
@@ -2299,7 +2320,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        hw = &adapter->hw;
        hw->back = adapter;
 
-       adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+       adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
        adapter->state = __I40EVF_STARTUP;
 
        /* Call save state here because it relies on the adapter struct. */
index 61e090558f31334588ca06a4860099c5428becc9..d4eb1a5e7d42c4562a659202685d0e8331d384c9 100644 (file)
@@ -51,8 +51,9 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
 
        err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
        if (err)
-               dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
-                       op, err, hw->aq.asq_last_status);
+               dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n",
+                       op, i40evf_stat_str(hw, err),
+                       i40evf_aq_str(hw, hw->aq.asq_last_status));
        return err;
 }
 
@@ -125,8 +126,11 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
        }
 
        pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
-       if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
-           (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
+       adapter->pf_version = *pf_vvi;
+
+       if ((pf_vvi->major > I40E_VIRTCHNL_VERSION_MAJOR) ||
+           ((pf_vvi->major == I40E_VIRTCHNL_VERSION_MAJOR) &&
+            (pf_vvi->minor > I40E_VIRTCHNL_VERSION_MINOR)))
                err = -EIO;
 
 out_alloc:
@@ -145,8 +149,24 @@ out:
  **/
 int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
 {
-       return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
-                                 NULL, 0);
+       u32 caps;
+
+       adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+       adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
+       caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+              I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
+              I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+              I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+       adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
+       adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
+       if (PF_IS_V11(adapter))
+               return i40evf_send_pf_msg(adapter,
+                                         I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+                                         (u8 *)&caps, sizeof(caps));
+       else
+               return i40evf_send_pf_msg(adapter,
+                                         I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+                                         NULL, 0);
 }
 
 /**
@@ -274,7 +294,7 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter)
        }
        adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
        vqs.vsi_id = adapter->vsi_res->vsi_id;
-       vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
+       vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
        vqs.rx_queues = vqs.tx_queues;
        adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
        i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
@@ -299,7 +319,7 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter)
        }
        adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
        vqs.vsi_id = adapter->vsi_res->vsi_id;
-       vqs.tx_queues = (1 << adapter->num_active_queues) - 1;
+       vqs.tx_queues = BIT(adapter->num_active_queues) - 1;
        vqs.rx_queues = vqs.tx_queues;
        adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
        i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
@@ -708,8 +728,9 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
                return;
        }
        if (v_retval) {
-               dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
-                       __func__, v_retval, v_opcode);
+               dev_err(&adapter->pdev->dev, "%s: PF returned error %d (%s) to our request %d\n",
+                       __func__, v_retval,
+                       i40evf_stat_str(&adapter->hw, v_retval), v_opcode);
        }
        switch (v_opcode) {
        case I40E_VIRTCHNL_OP_GET_STATS: {
@@ -729,6 +750,15 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
                adapter->current_stats = *stats;
                }
                break;
+       case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: {
+               u16 len = sizeof(struct i40e_virtchnl_vf_resource) +
+                         I40E_MAX_VF_VSI *
+                         sizeof(struct i40e_virtchnl_vsi_resource);
+               memcpy(adapter->vf_res, msg, min(msglen, len));
+               i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res);
+               i40evf_process_config(adapter);
+               }
+               break;
        case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
                /* enable transmits */
                i40evf_irq_enable(adapter, true);
@@ -740,7 +770,6 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
                i40evf_free_all_rx_resources(adapter);
                break;
        case I40E_VIRTCHNL_OP_VERSION:
-       case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
        case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
                /* Don't display an error if we get these out of sequence.
                 * If the firmware needed to get kicked, we'll get these and
index b0182dd313464ccceb85dd19c9489fcd7b3cd9c6..d19256994e5cfefce6793dbe58b953d9c79a0504 100644 (file)
@@ -139,10 +139,6 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
        if (ret_val)
                return ret_val;
 
-       /* reset page to 0 */
-       ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
-       if (ret_val)
-               return ret_val;
 
        if (data & E1000_M88E1112_STATUS_LINK)
                port = E1000_MEDIA_PORT_OTHER;
@@ -151,8 +147,20 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
        if (port && (hw->dev_spec._82575.media_port != port)) {
                hw->dev_spec._82575.media_port = port;
                hw->dev_spec._82575.media_changed = true;
+       }
+
+       if (port == E1000_MEDIA_PORT_COPPER) {
+               /* reset page to 0 */
+               ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+               if (ret_val)
+                       return ret_val;
+               igb_check_for_link_82575(hw);
        } else {
-               ret_val = igb_check_for_link_82575(hw);
+               igb_check_for_link_82575(hw);
+               /* reset page to 0 */
+               ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+               if (ret_val)
+                       return ret_val;
        }
 
        return 0;
index c1bb64d8366fa5e7741905ea9fc22d4b241054ce..987c9de247645a2d0ec1992d65703bf22a0daeb2 100644 (file)
@@ -1,5 +1,5 @@
 /* Intel(R) Gigabit Ethernet Linux driver
- * Copyright(c) 2007-2014 Intel Corporation.
+ * Copyright(c) 2007-2015 Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -36,9 +36,6 @@ static s32  igb_set_master_slave_mode(struct e1000_hw *hw);
 /* Cable length tables */
 static const u16 e1000_m88_cable_length_table[] = {
        0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
-#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
-       (sizeof(e1000_m88_cable_length_table) / \
-       sizeof(e1000_m88_cable_length_table[0]))
 
 static const u16 e1000_igp_2_cable_length_table[] = {
        0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
@@ -49,9 +46,6 @@ static const u16 e1000_igp_2_cable_length_table[] = {
        60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
        83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
        104, 109, 114, 118, 121, 124};
-#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
-       (sizeof(e1000_igp_2_cable_length_table) / \
-        sizeof(e1000_igp_2_cable_length_table[0]))
 
 /**
  *  igb_check_reset_block - Check if PHY reset is blocked
@@ -1700,7 +1694,7 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw)
 
        index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
                M88E1000_PSSR_CABLE_LENGTH_SHIFT;
-       if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+       if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
                ret_val = -E1000_ERR_PHY;
                goto out;
        }
@@ -1796,7 +1790,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
 
                index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
                        M88E1000_PSSR_CABLE_LENGTH_SHIFT;
-               if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+               if (index >= ARRAY_SIZE(e1000_m88_cable_length_table) - 1) {
                        ret_val = -E1000_ERR_PHY;
                        goto out;
                }
@@ -1840,7 +1834,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
        s32 ret_val = 0;
        u16 phy_data, i, agc_value = 0;
        u16 cur_agc_index, max_agc_index = 0;
-       u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+       u16 min_agc_index = ARRAY_SIZE(e1000_igp_2_cable_length_table) - 1;
        static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
                IGP02E1000_PHY_AGC_A,
                IGP02E1000_PHY_AGC_B,
@@ -1863,7 +1857,7 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
                                IGP02E1000_AGC_LENGTH_MASK;
 
                /* Array index bound check. */
-               if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+               if ((cur_agc_index >= ARRAY_SIZE(e1000_igp_2_cable_length_table)) ||
                    (cur_agc_index == 0)) {
                        ret_val = -E1000_ERR_PHY;
                        goto out;
index d5673eb90c542c75f3ae4903997afc736a7d7956..b7b9c670bb3c7e24db5ff9d8e3437af06c27d317 100644 (file)
@@ -2159,6 +2159,27 @@ static int igb_set_coalesce(struct net_device *netdev,
        struct igb_adapter *adapter = netdev_priv(netdev);
        int i;
 
+       if (ec->rx_max_coalesced_frames ||
+           ec->rx_coalesce_usecs_irq ||
+           ec->rx_max_coalesced_frames_irq ||
+           ec->tx_max_coalesced_frames ||
+           ec->tx_coalesce_usecs_irq ||
+           ec->stats_block_coalesce_usecs ||
+           ec->use_adaptive_rx_coalesce ||
+           ec->use_adaptive_tx_coalesce ||
+           ec->pkt_rate_low ||
+           ec->rx_coalesce_usecs_low ||
+           ec->rx_max_coalesced_frames_low ||
+           ec->tx_coalesce_usecs_low ||
+           ec->tx_max_coalesced_frames_low ||
+           ec->pkt_rate_high ||
+           ec->rx_coalesce_usecs_high ||
+           ec->rx_max_coalesced_frames_high ||
+           ec->tx_coalesce_usecs_high ||
+           ec->tx_max_coalesced_frames_high ||
+           ec->rate_sample_interval)
+               return -ENOTSUPP;
+
        if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
            ((ec->rx_coalesce_usecs > 3) &&
             (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
@@ -2396,10 +2417,6 @@ static int igb_get_ts_info(struct net_device *dev,
                        info->rx_filters |=
                                (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
                                (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
-                               (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
-                               (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
-                               (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
-                               (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
                                (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
 
                return 0;
index 2f70a9b152bd1789349d9c4d995852e95be1e70d..41e27404689648a4bad220e174db32cdf1077580 100644 (file)
@@ -57,8 +57,8 @@
 #include "igb.h"
 
 #define MAJ 5
-#define MIN 2
-#define BUILD 18
+#define MIN 3
+#define BUILD 0
 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
 __stringify(BUILD) "-k"
 char igb_driver_name[] = "igb";
@@ -6621,22 +6621,25 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
                            struct sk_buff *skb)
 {
        struct page *page = rx_buffer->page;
+       unsigned char *va = page_address(page) + rx_buffer->page_offset;
        unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
 #if (PAGE_SIZE < 8192)
        unsigned int truesize = IGB_RX_BUFSZ;
 #else
-       unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+       unsigned int truesize = SKB_DATA_ALIGN(size);
 #endif
+       unsigned int pull_len;
 
-       if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
-               unsigned char *va = page_address(page) + rx_buffer->page_offset;
+       if (unlikely(skb_is_nonlinear(skb)))
+               goto add_tail_frag;
 
-               if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
-                       igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
-                       va += IGB_TS_HDR_LEN;
-                       size -= IGB_TS_HDR_LEN;
-               }
+       if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
+               igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
+               va += IGB_TS_HDR_LEN;
+               size -= IGB_TS_HDR_LEN;
+       }
 
+       if (likely(size <= IGB_RX_HDR_LEN)) {
                memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 
                /* page is not reserved, we can reuse buffer as-is */
@@ -6648,8 +6651,21 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
                return false;
        }
 
+       /* we need the header to contain the greater of either ETH_HLEN or
+        * 60 bytes if the skb->len is less than 60 for skb_pad.
+        */
+       pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
+
+       /* align pull length to size of long to optimize memcpy performance */
+       memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+
+       /* update all of the pointers */
+       va += pull_len;
+       size -= pull_len;
+
+add_tail_frag:
        skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
-                       rx_buffer->page_offset, size, truesize);
+                       (unsigned long)va & ~PAGE_MASK, size, truesize);
 
        return igb_can_reuse_rx_page(rx_buffer, page, truesize);
 }
@@ -6790,62 +6806,6 @@ static bool igb_is_non_eop(struct igb_ring *rx_ring,
        return true;
 }
 
-/**
- *  igb_pull_tail - igb specific version of skb_pull_tail
- *  @rx_ring: rx descriptor ring packet is being transacted on
- *  @rx_desc: pointer to the EOP Rx descriptor
- *  @skb: pointer to current skb being adjusted
- *
- *  This function is an igb specific version of __pskb_pull_tail.  The
- *  main difference between this version and the original function is that
- *  this function can make several assumptions about the state of things
- *  that allow for significant optimizations versus the standard function.
- *  As a result we can do things like drop a frag and maintain an accurate
- *  truesize for the skb.
- */
-static void igb_pull_tail(struct igb_ring *rx_ring,
-                         union e1000_adv_rx_desc *rx_desc,
-                         struct sk_buff *skb)
-{
-       struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
-       unsigned char *va;
-       unsigned int pull_len;
-
-       /* it is valid to use page_address instead of kmap since we are
-        * working with pages allocated out of the lomem pool per
-        * alloc_page(GFP_ATOMIC)
-        */
-       va = skb_frag_address(frag);
-
-       if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
-               /* retrieve timestamp from buffer */
-               igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
-
-               /* update pointers to remove timestamp header */
-               skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
-               frag->page_offset += IGB_TS_HDR_LEN;
-               skb->data_len -= IGB_TS_HDR_LEN;
-               skb->len -= IGB_TS_HDR_LEN;
-
-               /* move va to start of packet data */
-               va += IGB_TS_HDR_LEN;
-       }
-
-       /* we need the header to contain the greater of either ETH_HLEN or
-        * 60 bytes if the skb->len is less than 60 for skb_pad.
-        */
-       pull_len = eth_get_headlen(va, IGB_RX_HDR_LEN);
-
-       /* align pull length to size of long to optimize memcpy performance */
-       skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
-       /* update all of the pointers */
-       skb_frag_size_sub(frag, pull_len);
-       frag->page_offset += pull_len;
-       skb->data_len -= pull_len;
-       skb->tail += pull_len;
-}
-
 /**
  *  igb_cleanup_headers - Correct corrupted or empty headers
  *  @rx_ring: rx descriptor ring packet is being transacted on
@@ -6873,10 +6833,6 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
                }
        }
 
-       /* place header in linear portion of buffer */
-       if (skb_is_nonlinear(skb))
-               igb_pull_tail(rx_ring, rx_desc, skb);
-
        /* if eth_skb_pad returns an error the skb was freed */
        if (eth_skb_pad(skb))
                return true;
index 6b87d963461462cc92885aa057b19adbb18125c0..b1e364d26aa73f7e2bbbab3a46f5eaaa01e2fd38 100644 (file)
@@ -1394,14 +1394,12 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
        /*
         * Continue setup of fdirctrl register bits:
         *  Turn perfect match filtering on
-        *  Report hash in RSS field of Rx wb descriptor
         *  Initialize the drop queue
         *  Move the flexible bytes to use the ethertype - shift 6 words
         *  Set the maximum length per hash bucket to 0xA filters
         *  Send interrupt when 64 (0x4 * 16) filters are left
         */
        fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
-                   IXGBE_FDIRCTRL_REPORT_STATUS |
                    (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
                    (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
                    (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
index ec7b2324b77b42489d9892de6b785ab7ca9936cf..f7aeb560a504af040603b2c3ebfa939f5d85b285 100644 (file)
@@ -2938,14 +2938,6 @@ static int ixgbe_get_ts_info(struct net_device *dev,
                        (1 << HWTSTAMP_FILTER_NONE) |
                        (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
                        (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
-                       (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
-                       (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
-                       (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
-                       (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
-                       (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
-                       (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
-                       (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
-                       (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
                        (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
                break;
        default:
index 9aa6104e34ea8e2bd93994e4d8291763014162b8..3e6a9319c7185b52a4571cbbab61aa9dd54c422c 100644 (file)
@@ -1360,14 +1360,31 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
 }
 
 #endif /* CONFIG_IXGBE_DCA */
+
+#define IXGBE_RSS_L4_TYPES_MASK \
+       ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
+        (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
+        (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
+        (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
+
 static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
                                 union ixgbe_adv_rx_desc *rx_desc,
                                 struct sk_buff *skb)
 {
-       if (ring->netdev->features & NETIF_F_RXHASH)
-               skb_set_hash(skb,
-                            le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
-                            PKT_HASH_TYPE_L3);
+       u16 rss_type;
+
+       if (!(ring->netdev->features & NETIF_F_RXHASH))
+               return;
+
+       rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
+                  IXGBE_RXDADV_RSSTYPE_MASK;
+
+       if (!rss_type)
+               return;
+
+       skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+                    (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
+                    PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
 }
 
 #ifdef IXGBE_FCOE
index 770e21a643880a9032cdc5b1e6460075acfd7250..58434584b16d9034f762ba85c71ab8251642e66e 100644 (file)
@@ -161,6 +161,18 @@ typedef u32 ixgbe_link_speed;
 #define IXGBE_RXDADV_SPLITHEADER_EN    0x00001000
 #define IXGBE_RXDADV_SPH               0x8000
 
+/* RSS Hash results */
+#define IXGBE_RXDADV_RSSTYPE_NONE              0x00000000
+#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP          0x00000001
+#define IXGBE_RXDADV_RSSTYPE_IPV4              0x00000002
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP          0x00000003
+#define IXGBE_RXDADV_RSSTYPE_IPV6_EX           0x00000004
+#define IXGBE_RXDADV_RSSTYPE_IPV6              0x00000005
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX       0x00000006
+#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP          0x00000007
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP          0x00000008
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX       0x00000009
+
 #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
                                      IXGBE_RXD_ERR_CE |  \
                                      IXGBE_RXD_ERR_LE |  \
index b2f5b161d792a769bdb780154bfe5895f56ba218..d3e5f5b37999a359e1ab1d1f4468a78e9afa5527 100644 (file)
@@ -813,22 +813,15 @@ static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 
-       /* We support this operation only for 82599 and x540 at the moment */
-       if (adapter->hw.mac.type < ixgbe_mac_X550_vf)
-               return IXGBEVF_82599_RETA_SIZE;
+       if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
+               return IXGBEVF_X550_VFRETA_SIZE;
 
-       return 0;
+       return IXGBEVF_82599_RETA_SIZE;
 }
 
 static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev)
 {
-       struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-
-       /* We support this operation only for 82599 and x540 at the moment */
-       if (adapter->hw.mac.type < ixgbe_mac_X550_vf)
-               return IXGBEVF_RSS_HASH_KEY_SIZE;
-
-       return 0;
+       return IXGBEVF_RSS_HASH_KEY_SIZE;
 }
 
 static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
@@ -840,21 +833,33 @@ static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
        if (hfunc)
                *hfunc = ETH_RSS_HASH_TOP;
 
-       /* If neither indirection table nor hash key was requested - just
-        * return a success avoiding taking any locks.
-        */
-       if (!indir && !key)
-               return 0;
+       if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) {
+               if (key)
+                       memcpy(key, adapter->rss_key, sizeof(adapter->rss_key));
 
-       spin_lock_bh(&adapter->mbx_lock);
-       if (indir)
-               err = ixgbevf_get_reta_locked(&adapter->hw, indir,
-                                             adapter->num_rx_queues);
+               if (indir) {
+                       int i;
 
-       if (!err && key)
-               err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
+                       for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++)
+                               indir[i] = adapter->rss_indir_tbl[i];
+               }
+       } else {
+               /* If neither indirection table nor hash key was requested
+                *  - just return a success avoiding taking any locks.
+                */
+               if (!indir && !key)
+                       return 0;
 
-       spin_unlock_bh(&adapter->mbx_lock);
+               spin_lock_bh(&adapter->mbx_lock);
+               if (indir)
+                       err = ixgbevf_get_reta_locked(&adapter->hw, indir,
+                                                     adapter->num_rx_queues);
+
+               if (!err && key)
+                       err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
+
+               spin_unlock_bh(&adapter->mbx_lock);
+       }
 
        return err;
 }
index 775d089009499ddfcee4d97c3a55af73528138c5..04c7ec8446e0329c71eaee8eea346a4e92836b8e 100644 (file)
@@ -144,9 +144,11 @@ struct ixgbevf_ring {
 
 #define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
 #define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
-#define IXGBEVF_MAX_RSS_QUEUES 2
-#define IXGBEVF_82599_RETA_SIZE        128
+#define IXGBEVF_MAX_RSS_QUEUES         2
+#define IXGBEVF_82599_RETA_SIZE                128     /* 128 entries */
+#define IXGBEVF_X550_VFRETA_SIZE       64      /* 64 entries */
 #define IXGBEVF_RSS_HASH_KEY_SIZE      40
+#define IXGBEVF_VFRSSRK_REGS           10      /* 10 registers for RSS key */
 
 #define IXGBEVF_DEFAULT_TXD    1024
 #define IXGBEVF_DEFAULT_RXD    512
@@ -447,6 +449,9 @@ struct ixgbevf_adapter {
 
        spinlock_t mbx_lock;
        unsigned long last_reset;
+
+       u32 rss_key[IXGBEVF_VFRSSRK_REGS];
+       u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE];
 };
 
 enum ixbgevf_state_t {
index e71cdde9cb017aecab834d2f2d9c5d4821c3d42e..88298a3ef942e8200c2edaf7cce17ca40f18978d 100644 (file)
@@ -457,6 +457,32 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
        napi_gro_receive(&q_vector->napi, skb);
 }
 
+#define IXGBE_RSS_L4_TYPES_MASK \
+       ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \
+        (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \
+        (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \
+        (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP))
+
+static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring,
+                                  union ixgbe_adv_rx_desc *rx_desc,
+                                  struct sk_buff *skb)
+{
+       u16 rss_type;
+
+       if (!(ring->netdev->features & NETIF_F_RXHASH))
+               return;
+
+       rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) &
+                  IXGBE_RXDADV_RSSTYPE_MASK;
+
+       if (!rss_type)
+               return;
+
+       skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
+                    (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
+                    PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
+}
+
 /**
  * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
  * @ring: structure containig ring specific data
@@ -506,6 +532,7 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
                                       union ixgbe_adv_rx_desc *rx_desc,
                                       struct sk_buff *skb)
 {
+       ixgbevf_rx_hash(rx_ring, rx_desc, skb);
        ixgbevf_rx_checksum(rx_ring, rx_desc, skb);
 
        if (ixgbevf_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
@@ -648,46 +675,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
        }
 }
 
-/**
- * ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @skb: pointer to current skb being adjusted
- *
- * This function is an ixgbevf specific version of __pskb_pull_tail.  The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- **/
-static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
-                             struct sk_buff *skb)
-{
-       struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
-       unsigned char *va;
-       unsigned int pull_len;
-
-       /* it is valid to use page_address instead of kmap since we are
-        * working with pages allocated out of the lomem pool per
-        * alloc_page(GFP_ATOMIC)
-        */
-       va = skb_frag_address(frag);
-
-       /* we need the header to contain the greater of either ETH_HLEN or
-        * 60 bytes if the skb->len is less than 60 for skb_pad.
-        */
-       pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
-
-       /* align pull length to size of long to optimize memcpy performance */
-       skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
-       /* update all of the pointers */
-       skb_frag_size_sub(frag, pull_len);
-       frag->page_offset += pull_len;
-       skb->data_len -= pull_len;
-       skb->tail += pull_len;
-}
-
 /**
  * ixgbevf_cleanup_headers - Correct corrupted or empty headers
  * @rx_ring: rx descriptor ring packet is being transacted on
@@ -721,10 +708,6 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
                }
        }
 
-       /* place header in linear portion of buffer */
-       if (skb_is_nonlinear(skb))
-               ixgbevf_pull_tail(rx_ring, skb);
-
        /* if eth_skb_pad returns an error the skb was freed */
        if (eth_skb_pad(skb))
                return true;
@@ -789,16 +772,19 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
                                struct sk_buff *skb)
 {
        struct page *page = rx_buffer->page;
+       unsigned char *va = page_address(page) + rx_buffer->page_offset;
        unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
 #if (PAGE_SIZE < 8192)
        unsigned int truesize = IXGBEVF_RX_BUFSZ;
 #else
        unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
 #endif
+       unsigned int pull_len;
 
-       if ((size <= IXGBEVF_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
-               unsigned char *va = page_address(page) + rx_buffer->page_offset;
+       if (unlikely(skb_is_nonlinear(skb)))
+               goto add_tail_frag;
 
+       if (likely(size <= IXGBEVF_RX_HDR_SIZE)) {
                memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 
                /* page is not reserved, we can reuse buffer as is */
@@ -810,8 +796,21 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
                return false;
        }
 
+       /* we need the header to contain the greater of either ETH_HLEN or
+        * 60 bytes if the skb->len is less than 60 for skb_pad.
+        */
+       pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
+
+       /* align pull length to size of long to optimize memcpy performance */
+       memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
+
+       /* update all of the pointers */
+       va += pull_len;
+       size -= pull_len;
+
+add_tail_frag:
        skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
-                       rx_buffer->page_offset, size, truesize);
+                       (unsigned long)va & ~PAGE_MASK, size, truesize);
 
        /* avoid re-using remote pages */
        if (unlikely(ixgbevf_page_is_reserved(page)))
@@ -1697,22 +1696,25 @@ static void ixgbevf_setup_vfmrqc(struct ixgbevf_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        u32 vfmrqc = 0, vfreta = 0;
-       u32 rss_key[10];
        u16 rss_i = adapter->num_rx_queues;
-       int i, j;
+       u8 i, j;
 
        /* Fill out hash function seeds */
-       netdev_rss_key_fill(rss_key, sizeof(rss_key));
-       for (i = 0; i < 10; i++)
-               IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
+       netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key));
+       for (i = 0; i < IXGBEVF_VFRSSRK_REGS; i++)
+               IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), adapter->rss_key[i]);
 
-       /* Fill out redirection table */
-       for (i = 0, j = 0; i < 64; i++, j++) {
+       for (i = 0, j = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++, j++) {
                if (j == rss_i)
                        j = 0;
-               vfreta = (vfreta << 8) | (j * 0x1);
-               if ((i & 3) == 3)
+
+               adapter->rss_indir_tbl[i] = j;
+
+               vfreta |= j << (i & 0x3) * 8;
+               if ((i & 3) == 3) {
                        IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), vfreta);
+                       vfreta = 0;
+               }
        }
 
        /* Perform hash on these packet types */
index 62e48bc0cb23ba98ed2cb2c447af288bd1c3906d..fe2299ac4f5c0e43b1ff3cf124381df3a1daecf1 100644 (file)
@@ -3027,8 +3027,8 @@ static int mvneta_probe(struct platform_device *pdev)
        const char *dt_mac_addr;
        char hw_mac_addr[ETH_ALEN];
        const char *mac_from;
+       const char *managed;
        int phy_mode;
-       int fixed_phy = 0;
        int err;
 
        /* Our multiqueue support is not complete, so for now, only
@@ -3062,7 +3062,6 @@ static int mvneta_probe(struct platform_device *pdev)
                        dev_err(&pdev->dev, "cannot register fixed PHY\n");
                        goto err_free_irq;
                }
-               fixed_phy = 1;
 
                /* In the case of a fixed PHY, the DT node associated
                 * to the PHY is the Ethernet MAC DT node.
@@ -3086,8 +3085,10 @@ static int mvneta_probe(struct platform_device *pdev)
        pp = netdev_priv(dev);
        pp->phy_node = phy_node;
        pp->phy_interface = phy_mode;
-       pp->use_inband_status = (phy_mode == PHY_INTERFACE_MODE_SGMII) &&
-                               fixed_phy;
+
+       err = of_property_read_string(dn, "managed", &managed);
+       pp->use_inband_status = (err == 0 &&
+                                strcmp(managed, "in-band-status") == 0);
 
        pp->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(pp->clk)) {
index 52a6665b7abf4f248b89f60e3408c1f020daaa85..d54701047401d73eeb54feca8cbc48c27ab5463e 100644 (file)
@@ -18,5 +18,6 @@ if NET_VENDOR_MELLANOX
 
 source "drivers/net/ethernet/mellanox/mlx4/Kconfig"
 source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig"
+source "drivers/net/ethernet/mellanox/mlxsw/Kconfig"
 
 endif # NET_VENDOR_MELLANOX
index 38fe32ef5e5f438d713e840f6b83332f19e6a6a8..2e2a5ec509ac520bfddd3348e6fa08e373a37167 100644 (file)
@@ -4,3 +4,4 @@
 
 obj-$(CONFIG_MLX4_CORE) += mlx4/
 obj-$(CONFIG_MLX5_CORE) += mlx5/core/
+obj-$(CONFIG_MLXSW_CORE) += mlxsw/
index 99ba1c50e5851769fa58ddee4522c2e6062db0a8..f79d8124321e525b04de13e7ad1509105b789a2d 100644 (file)
@@ -102,6 +102,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
 
 static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
        "blueflame",
+       "phv-bit"
 };
 
 static const char main_strings[][ETH_GSTRING_LEN] = {
@@ -1797,35 +1798,49 @@ static int mlx4_en_get_ts_info(struct net_device *dev,
 static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
        bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
        bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
+       bool phv_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_PHV);
+       bool phv_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_PHV);
        int i;
+       int ret = 0;
 
-       if (bf_enabled_new == bf_enabled_old)
-               return 0; /* Nothing to do */
+       if (bf_enabled_new != bf_enabled_old) {
+               if (bf_enabled_new) {
+                       bool bf_supported = true;
 
-       if (bf_enabled_new) {
-               bool bf_supported = true;
+                       for (i = 0; i < priv->tx_ring_num; i++)
+                               bf_supported &= priv->tx_ring[i]->bf_alloced;
 
-               for (i = 0; i < priv->tx_ring_num; i++)
-                       bf_supported &= priv->tx_ring[i]->bf_alloced;
+                       if (!bf_supported) {
+                               en_err(priv, "BlueFlame is not supported\n");
+                               return -EINVAL;
+                       }
 
-               if (!bf_supported) {
-                       en_err(priv, "BlueFlame is not supported\n");
-                       return -EINVAL;
+                       priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
+               } else {
+                       priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
                }
 
-               priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
-       } else {
-               priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
-       }
-
-       for (i = 0; i < priv->tx_ring_num; i++)
-               priv->tx_ring[i]->bf_enabled = bf_enabled_new;
+               for (i = 0; i < priv->tx_ring_num; i++)
+                       priv->tx_ring[i]->bf_enabled = bf_enabled_new;
 
-       en_info(priv, "BlueFlame %s\n",
-               bf_enabled_new ?  "Enabled" : "Disabled");
+               en_info(priv, "BlueFlame %s\n",
+                       bf_enabled_new ?  "Enabled" : "Disabled");
+       }
 
+       if (phv_enabled_new != phv_enabled_old) {
+               ret = set_phv_bit(mdev->dev, priv->port, (int)phv_enabled_new);
+               if (ret)
+                       return ret;
+               else if (phv_enabled_new)
+                       priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
+               else
+                       priv->pflags &= ~MLX4_EN_PRIV_FLAGS_PHV;
+               en_info(priv, "PHV bit %s\n",
+                       phv_enabled_new ?  "Enabled" : "Disabled");
+       }
        return 0;
 }
 
index e0de2fd1ce124d3d668659b89544d172164037f4..4726122ea76b296f4b45258cb8ee9358c668ff60 100644 (file)
@@ -2184,6 +2184,25 @@ static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        }
 }
 
+static netdev_features_t mlx4_en_fix_features(struct net_device *netdev,
+                                             netdev_features_t features)
+{
+       struct mlx4_en_priv *en_priv = netdev_priv(netdev);
+       struct mlx4_en_dev *mdev = en_priv->mdev;
+
+       /* Since there is no support for separate RX C-TAG/S-TAG vlan accel
+        * enable/disable make sure S-TAG flag is always in same state as
+        * C-TAG.
+        */
+       if (features & NETIF_F_HW_VLAN_CTAG_RX &&
+           !(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
+               features |= NETIF_F_HW_VLAN_STAG_RX;
+       else
+               features &= ~NETIF_F_HW_VLAN_STAG_RX;
+
+       return features;
+}
+
 static int mlx4_en_set_features(struct net_device *netdev,
                netdev_features_t features)
 {
@@ -2218,6 +2237,10 @@ static int mlx4_en_set_features(struct net_device *netdev,
                en_info(priv, "Turn %s TX vlan strip offload\n",
                        (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF");
 
+       if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_STAG_TX))
+               en_info(priv, "Turn %s TX S-VLAN strip offload\n",
+                       (features & NETIF_F_HW_VLAN_STAG_TX) ? "ON" : "OFF");
+
        if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_LOOPBACK)) {
                en_info(priv, "Turn %s loopback\n",
                        (features & NETIF_F_LOOPBACK) ? "ON" : "OFF");
@@ -2460,6 +2483,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
        .ndo_poll_controller    = mlx4_en_netpoll,
 #endif
        .ndo_set_features       = mlx4_en_set_features,
+       .ndo_fix_features       = mlx4_en_fix_features,
        .ndo_setup_tc           = mlx4_en_setup_tc,
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = mlx4_en_filter_rfs,
@@ -2500,6 +2524,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
        .ndo_poll_controller    = mlx4_en_netpoll,
 #endif
        .ndo_set_features       = mlx4_en_set_features,
+       .ndo_fix_features       = mlx4_en_fix_features,
        .ndo_setup_tc           = mlx4_en_setup_tc,
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = mlx4_en_filter_rfs,
@@ -2931,6 +2956,27 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        dev->hw_features |= NETIF_F_LOOPBACK |
                        NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
 
+       if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
+               dev->features |= NETIF_F_HW_VLAN_STAG_RX |
+                       NETIF_F_HW_VLAN_STAG_FILTER;
+               dev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
+       }
+
+       if (mlx4_is_slave(mdev->dev)) {
+               int phv;
+
+               err = get_phv_bit(mdev->dev, port, &phv);
+               if (!err && phv) {
+                       dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+                       priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
+               }
+       } else {
+               if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
+                   !(mdev->dev->caps.flags2 &
+                     MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN))
+                       dev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+       }
+
        if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)
                dev->hw_features |= NETIF_F_RXFCS;
 
index 9c145dddd7175fffda22c71a8f31d4d27d807ceb..4402a1e48c9bb9d8153df9e9f9378d5e4ece38b3 100644 (file)
@@ -725,7 +725,7 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
 
        hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
 
-       if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) &&
+       if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
            !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
                hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
                hdr += sizeof(struct vlan_hdr);
@@ -906,17 +906,25 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                                gro_skb->csum_level = 1;
 
                        if ((cqe->vlan_my_qpn &
-                           cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK)) &&
+                           cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
                            (dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
                                u16 vid = be16_to_cpu(cqe->sl_vid);
 
                                __vlan_hwaccel_put_tag(gro_skb, htons(ETH_P_8021Q), vid);
+                       } else if ((be32_to_cpu(cqe->vlan_my_qpn) &
+                                 MLX4_CQE_SVLAN_PRESENT_MASK) &&
+                                (dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
+                               __vlan_hwaccel_put_tag(gro_skb,
+                                                      htons(ETH_P_8021AD),
+                                                      be16_to_cpu(cqe->sl_vid));
                        }
 
                        if (dev->features & NETIF_F_RXHASH)
                                skb_set_hash(gro_skb,
                                             be32_to_cpu(cqe->immed_rss_invalid),
-                                            PKT_HASH_TYPE_L3);
+                                            (ip_summed == CHECKSUM_UNNECESSARY) ?
+                                               PKT_HASH_TYPE_L4 :
+                                               PKT_HASH_TYPE_L3);
 
                        skb_record_rx_queue(gro_skb, cq->ring);
                        skb_mark_napi_id(gro_skb, &cq->napi);
@@ -962,12 +970,19 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                if (dev->features & NETIF_F_RXHASH)
                        skb_set_hash(skb,
                                     be32_to_cpu(cqe->immed_rss_invalid),
-                                    PKT_HASH_TYPE_L3);
+                                    (ip_summed == CHECKSUM_UNNECESSARY) ?
+                                       PKT_HASH_TYPE_L4 :
+                                       PKT_HASH_TYPE_L3);
 
                if ((be32_to_cpu(cqe->vlan_my_qpn) &
-                   MLX4_CQE_VLAN_PRESENT_MASK) &&
+                   MLX4_CQE_CVLAN_PRESENT_MASK) &&
                    (dev->features & NETIF_F_HW_VLAN_CTAG_RX))
                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->sl_vid));
+               else if ((be32_to_cpu(cqe->vlan_my_qpn) &
+                         MLX4_CQE_SVLAN_PRESENT_MASK) &&
+                        (dev->features & NETIF_F_HW_VLAN_STAG_RX))
+                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
+                                              be16_to_cpu(cqe->sl_vid));
 
                if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
                        timestamp = mlx4_en_get_cqe_ts(cqe);
@@ -1065,7 +1080,10 @@ static const int frag_sizes[] = {
 void mlx4_en_calc_rx_buf(struct net_device *dev)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
-       int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN;
+       /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
+        * headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
+        */
+       int eff_mtu = dev->mtu + ETH_HLEN + (2 * VLAN_HLEN);
        int buf_size = 0;
        int i = 0;
 
index c10d98f6ad967b13640b5d9b2fe033f377565ff0..494e7762fdb19efb83d76f187b88fd37d422d5b5 100644 (file)
@@ -718,6 +718,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
        u32 index, bf_index;
        __be32 op_own;
        u16 vlan_tag = 0;
+       u16 vlan_proto = 0;
        int i_frag;
        int lso_header_size;
        void *fragptr = NULL;
@@ -750,9 +751,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                goto tx_drop;
        }
 
-       if (skb_vlan_tag_present(skb))
+       if (skb_vlan_tag_present(skb)) {
                vlan_tag = skb_vlan_tag_get(skb);
-
+               vlan_proto = be16_to_cpu(skb->vlan_proto);
+       }
 
        netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
 
@@ -958,8 +960,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                ring->bf.offset ^= ring->bf.buf_size;
        } else {
                tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
-               tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
-                       !!skb_vlan_tag_present(skb);
+               if (vlan_proto == ETH_P_8021AD)
+                       tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN;
+               else if (vlan_proto == ETH_P_8021Q)
+                       tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
+
                tx_desc->ctrl.fence_size = real_size;
 
                /* Ensure new descriptor hits memory
index e30bf57ad7a18ff559eb4bba122252eaf0308964..e8ec1dec5789a8d80499e8c478e4822567480284 100644 (file)
@@ -154,6 +154,7 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
                [26] = "Port ETS Scheduler support",
                [27] = "Port beacon support",
                [28] = "RX-ALL support",
+               [29] = "802.1ad offload support",
        };
        int i;
 
@@ -307,6 +308,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
 
 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
 #define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31)
+#define QUERY_FUNC_CAP_PHV_BIT                 0x40
 
        if (vhcr->op_modifier == 1) {
                struct mlx4_active_ports actv_ports =
@@ -351,6 +353,12 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
                MLX4_PUT(outbox->buf, dev->caps.phys_port_id[vhcr->in_modifier],
                         QUERY_FUNC_CAP_PHYS_PORT_ID);
 
+               if (dev->caps.phv_bit[port]) {
+                       field = QUERY_FUNC_CAP_PHV_BIT;
+                       MLX4_PUT(outbox->buf, field,
+                                QUERY_FUNC_CAP_FLAGS0_OFFSET);
+               }
+
        } else if (vhcr->op_modifier == 0) {
                struct mlx4_active_ports actv_ports =
                        mlx4_get_active_ports(dev, slave);
@@ -600,6 +608,9 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
                MLX4_GET(func_cap->phys_port_id, outbox,
                         QUERY_FUNC_CAP_PHYS_PORT_ID);
 
+       MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
+       func_cap->flags |= (field & QUERY_FUNC_CAP_PHV_BIT);
+
        /* All other resources are allocated by the master, but we still report
         * 'num' and 'reserved' capabilities as follows:
         * - num remains the maximum resource index
@@ -700,6 +711,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET    0x92
 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET                0x94
 #define QUERY_DEV_CAP_CONFIG_DEV_OFFSET                0x94
+#define QUERY_DEV_CAP_PHV_EN_OFFSET            0x96
 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET         0x98
 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET                0xa0
 #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET                0x9c
@@ -898,6 +910,12 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV;
        if (field & (1 << 2))
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
+       MLX4_GET(field, outbox, QUERY_DEV_CAP_PHV_EN_OFFSET);
+       if (field & 0x80)
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PHV_EN;
+       if (field & 0x40)
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN;
+
        MLX4_GET(dev_cap->reserved_lkey, outbox,
                 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
        MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET);
@@ -1992,6 +2010,10 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
        MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
        MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
 
+       /* phv_check enable */
+       MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
+       if (byte_field & 0x2)
+               param->phv_check_en = 1;
 out:
        mlx4_free_cmd_mailbox(dev, mailbox);
 
@@ -2758,3 +2780,63 @@ int mlx4_ACCESS_REG_wrapper(struct mlx4_dev *dev, int slave,
                            0, MLX4_CMD_ACCESS_REG, MLX4_CMD_TIME_CLASS_C,
                            MLX4_CMD_NATIVE);
 }
+
+static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit)
+{
+#define SET_PORT_GEN_PHV_VALID 0x10
+#define SET_PORT_GEN_PHV_EN    0x80
+
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_set_port_general_context *context;
+       u32 in_mod;
+       int err;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       context = mailbox->buf;
+
+       context->v_ignore_fcs |=  SET_PORT_GEN_PHV_VALID;
+       if (phv_bit)
+               context->phv_en |=  SET_PORT_GEN_PHV_EN;
+
+       in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+                      MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+                      MLX4_CMD_NATIVE);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+
+int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv)
+{
+       int err;
+       struct mlx4_func_cap func_cap;
+
+       memset(&func_cap, 0, sizeof(func_cap));
+       err = mlx4_QUERY_FUNC_CAP(dev, port, &func_cap);
+       if (!err)
+               *phv = func_cap.flags & QUERY_FUNC_CAP_PHV_BIT;
+       return err;
+}
+EXPORT_SYMBOL(get_phv_bit);
+
+int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val)
+{
+       int ret;
+
+       if (mlx4_is_slave(dev))
+               return -EPERM;
+
+       if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN &&
+           !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN)) {
+               ret = mlx4_SET_PORT_phv_bit(dev, port, new_val);
+               if (!ret)
+                       dev->caps.phv_bit[port] = new_val;
+               return ret;
+       }
+
+       return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(set_phv_bit);
index 07cb7c2461adaa90cbfab5e478a6a82d14613f87..08de5555c2f4d032bd61cf71689ef3cd0d8edd81 100644 (file)
@@ -204,6 +204,7 @@ struct mlx4_init_hca_param {
        u16 cqe_size; /* For use only when CQE stride feature enabled */
        u16 eqe_size; /* For use only when EQE stride feature enabled */
        u8 rss_ip_frags;
+       u8 phv_check_en; /* for QUERY_HCA */
 };
 
 struct mlx4_init_ib_param {
index 29c2a017a450277657a5d5e9ad4a4878575cbb2c..121c579888bba15789c96584cab45bd2c738f709 100644 (file)
@@ -405,6 +405,21 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        dev->caps.max_gso_sz         = dev_cap->max_gso_sz;
        dev->caps.max_rss_tbl_sz     = dev_cap->max_rss_tbl_sz;
 
+       if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
+               struct mlx4_init_hca_param hca_param;
+
+               memset(&hca_param, 0, sizeof(hca_param));
+               err = mlx4_QUERY_HCA(dev, &hca_param);
+               /* Turn off PHV_EN flag in case phv_check_en is set.
+                * phv_check_en is a HW check that parse the packet and verify
+                * phv bit was reported correctly in the wqe. To allow QinQ
+                * PHV_EN flag should be set and phv_check_en must be cleared
+                * otherwise QinQ packets will be drop by the HW.
+                */
+               if (err || hca_param.phv_check_en)
+                       dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN;
+       }
+
        /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
        if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
                dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
@@ -2912,6 +2927,8 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
 {
        u64 dev_flags = dev->flags;
        int err = 0;
+       int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev),
+                                       MLX4_MAX_NUM_VF);
 
        if (reset_flow) {
                dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs),
@@ -2937,6 +2954,12 @@ static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
        }
 
        if (!(dev->flags &  MLX4_FLAG_SRIOV)) {
+               if (total_vfs > fw_enabled_sriov_vfs) {
+                       mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n",
+                                total_vfs, fw_enabled_sriov_vfs);
+                       err = -ENOMEM;
+                       goto disable_sriov;
+               }
                mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
                err = pci_enable_sriov(pdev, total_vfs);
        }
@@ -3418,20 +3441,20 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
                        goto err_disable_pdev;
                }
        }
-       if (total_vfs >= MLX4_MAX_NUM_VF) {
+       if (total_vfs > MLX4_MAX_NUM_VF) {
                dev_err(&pdev->dev,
-                       "Requested more VF's (%d) than allowed (%d)\n",
-                       total_vfs, MLX4_MAX_NUM_VF - 1);
+                       "Requested more VF's (%d) than allowed by hw (%d)\n",
+                       total_vfs, MLX4_MAX_NUM_VF);
                err = -EINVAL;
                goto err_disable_pdev;
        }
 
        for (i = 0; i < MLX4_MAX_PORTS; i++) {
-               if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) {
+               if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) {
                        dev_err(&pdev->dev,
-                               "Requested more VF's (%d) for port (%d) than allowed (%d)\n",
+                               "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n",
                                nvfs[i] + nvfs[2], i + 1,
-                               MLX4_MAX_NUM_VF_P_PORT - 1);
+                               MLX4_MAX_NUM_VF_P_PORT);
                        err = -EINVAL;
                        goto err_disable_pdev;
                }
index a092c5c34d4375df330c1f8fea2b01d2545580dd..232b2b55f23b9170b32f351926c200b6ac0e7f1c 100644 (file)
@@ -787,6 +787,9 @@ struct mlx4_set_port_general_context {
        u8 pprx;
        u8 pfcrx;
        u16 reserved4;
+       u32 reserved5;
+       u8 phv_en;
+       u8 reserved6[3];
 };
 
 struct mlx4_set_port_rqp_calc_context {
index 666d1669eb5233f9a8e6baf5773621159375af25..defcf8c395bface7f024043cc51484bd7a4f3820 100644 (file)
@@ -95,6 +95,7 @@
  */
 
 #define MLX4_EN_PRIV_FLAGS_BLUEFLAME 1
+#define MLX4_EN_PRIV_FLAGS_PHV      2
 
 #define MLX4_EN_WATCHDOG_TIMEOUT       (15 * HZ)
 
index 0715b497511f6c861f5ac027341960fdc0acfab5..6cb38304669f6e5618edfea860a8c8d5f49e5c54 100644 (file)
  * register it in a memory region at HCA virtual address 0.
  */
 
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
+static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
+                                          size_t size, dma_addr_t *dma_handle,
+                                          int node)
+{
+       struct mlx5_priv *priv = &dev->priv;
+       int original_node;
+       void *cpu_handle;
+
+       mutex_lock(&priv->alloc_mutex);
+       original_node = dev_to_node(&dev->pdev->dev);
+       set_dev_node(&dev->pdev->dev, node);
+       cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size,
+                                        dma_handle, GFP_KERNEL);
+       set_dev_node(&dev->pdev->dev, original_node);
+       mutex_unlock(&priv->alloc_mutex);
+       return cpu_handle;
+}
+
+int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+                       struct mlx5_buf *buf, int node)
 {
        dma_addr_t t;
 
        buf->size = size;
        buf->npages       = 1;
        buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
-       buf->direct.buf   = dma_zalloc_coherent(&dev->pdev->dev,
-                                               size, &t, GFP_KERNEL);
+       buf->direct.buf   = mlx5_dma_zalloc_coherent_node(dev, size,
+                                                         &t, node);
        if (!buf->direct.buf)
                return -ENOMEM;
 
@@ -66,6 +85,11 @@ int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
 
        return 0;
 }
+
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
+{
+       return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
+}
 EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
 
 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
@@ -75,7 +99,8 @@ void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
 }
 EXPORT_SYMBOL_GPL(mlx5_buf_free);
 
-static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
+static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
+                                                int node)
 {
        struct mlx5_db_pgdir *pgdir;
 
@@ -84,8 +109,9 @@ static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct device *dma_device)
                return NULL;
 
        bitmap_fill(pgdir->bitmap, MLX5_DB_PER_PAGE);
-       pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
-                                           &pgdir->db_dma, GFP_KERNEL);
+
+       pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
+                                                      &pgdir->db_dma, node);
        if (!pgdir->db_page) {
                kfree(pgdir);
                return NULL;
@@ -118,7 +144,7 @@ static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
        return 0;
 }
 
-int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
 {
        struct mlx5_db_pgdir *pgdir;
        int ret = 0;
@@ -129,7 +155,7 @@ int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
                if (!mlx5_alloc_db_from_pgdir(pgdir, db))
                        goto out;
 
-       pgdir = mlx5_alloc_db_pgdir(&(dev->pdev->dev));
+       pgdir = mlx5_alloc_db_pgdir(dev, node);
        if (!pgdir) {
                ret = -ENOMEM;
                goto out;
@@ -145,6 +171,12 @@ out:
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
+
+int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+{
+       return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
+}
 EXPORT_SYMBOL_GPL(mlx5_db_alloc);
 
 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
index 3d23bd657e3c0cf7dc6d1d61c530a2d15053712a..45f6dc75c0df99c9d7a8f92c976bd014bdad24f1 100644 (file)
@@ -60,6 +60,7 @@
 
 #define MLX5E_TX_CQ_POLL_BUDGET        128
 #define MLX5E_UPDATE_STATS_INTERVAL    200 /* msecs */
+#define MLX5E_SQ_BF_BUDGET             16
 
 static const char vport_strings[][ETH_GSTRING_LEN] = {
        /* vport statistics */
@@ -195,6 +196,8 @@ struct mlx5e_params {
        u16 rx_hash_log_tbl_sz;
        bool lro_en;
        u32 lro_wqe_sz;
+       u8  rss_hfunc;
+       u16 tx_max_inline;
 };
 
 enum {
@@ -266,7 +269,9 @@ struct mlx5e_sq {
        /* dirtied @xmit */
        u16                        pc ____cacheline_aligned_in_smp;
        u32                        dma_fifo_pc;
-       u32                        bf_offset;
+       u16                        bf_offset;
+       u16                        prev_cc;
+       u8                         bf_budget;
        struct mlx5e_sq_stats      stats;
 
        struct mlx5e_cq            cq;
@@ -279,9 +284,10 @@ struct mlx5e_sq {
        struct mlx5_wq_cyc         wq;
        u32                        dma_fifo_mask;
        void __iomem              *uar_map;
+       void __iomem              *uar_bf_map;
        struct netdev_queue       *txq;
        u32                        sqn;
-       u32                        bf_buf_size;
+       u16                        bf_buf_size;
        u16                        max_inline;
        u16                        edge;
        struct device             *pdev;
@@ -324,14 +330,18 @@ struct mlx5e_channel {
 };
 
 enum mlx5e_traffic_types {
-       MLX5E_TT_IPV4_TCP = 0,
-       MLX5E_TT_IPV6_TCP = 1,
-       MLX5E_TT_IPV4_UDP = 2,
-       MLX5E_TT_IPV6_UDP = 3,
-       MLX5E_TT_IPV4     = 4,
-       MLX5E_TT_IPV6     = 5,
-       MLX5E_TT_ANY      = 6,
-       MLX5E_NUM_TT      = 7,
+       MLX5E_TT_IPV4_TCP,
+       MLX5E_TT_IPV6_TCP,
+       MLX5E_TT_IPV4_UDP,
+       MLX5E_TT_IPV6_UDP,
+       MLX5E_TT_IPV4_IPSEC_AH,
+       MLX5E_TT_IPV6_IPSEC_AH,
+       MLX5E_TT_IPV4_IPSEC_ESP,
+       MLX5E_TT_IPV6_IPSEC_ESP,
+       MLX5E_TT_IPV4,
+       MLX5E_TT_IPV6,
+       MLX5E_TT_ANY,
+       MLX5E_NUM_TT,
 };
 
 enum {
@@ -379,7 +389,6 @@ struct mlx5e_flow_table {
 
 struct mlx5e_priv {
        /* priv data path fields - start */
-       int                        num_tc;
        int                        default_vlan_prio;
        struct mlx5e_sq            **txq_to_sq_map;
        /* priv data path fields - end */
@@ -487,12 +496,12 @@ void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
 
 int mlx5e_open_locked(struct net_device *netdev);
 int mlx5e_close_locked(struct net_device *netdev);
-int mlx5e_update_priv_params(struct mlx5e_priv *priv,
-                            struct mlx5e_params *new_params);
 
 static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
-                                     struct mlx5e_tx_wqe *wqe)
+                                     struct mlx5e_tx_wqe *wqe, int bf_sz)
 {
+       u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
+
        /* ensure wqe is visible to device before updating doorbell record */
        dma_wmb();
 
@@ -503,9 +512,15 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
         */
        wmb();
 
-       mlx5_write64((__be32 *)&wqe->ctrl,
-                    sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset,
-                    NULL);
+       if (bf_sz) {
+               __iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz);
+
+               /* flush the write-combining mapped buffer */
+               wmb();
+
+       } else {
+               mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
+       }
 
        sq->bf_offset ^= sq->bf_buf_size;
 }
@@ -519,3 +534,4 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
 }
 
 extern const struct ethtool_ops mlx5e_ethtool_ops;
+u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
index 388938482ff99dabdd0f95229c738dfd915a0c7f..b95aa3384c367cda65fd6a875553cad0c8638d69 100644 (file)
@@ -173,7 +173,7 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
        case ETH_SS_STATS:
                return NUM_VPORT_COUNTERS +
                       priv->params.num_channels * NUM_RQ_STATS +
-                      priv->params.num_channels * priv->num_tc *
+                      priv->params.num_channels * priv->params.num_tc *
                                                   NUM_SQ_STATS;
        /* fallthrough */
        default:
@@ -207,7 +207,7 @@ static void mlx5e_get_strings(struct net_device *dev,
                                        "rx%d_%s", i, rq_stats_strings[j]);
 
                for (i = 0; i < priv->params.num_channels; i++)
-                       for (tc = 0; tc < priv->num_tc; tc++)
+                       for (tc = 0; tc < priv->params.num_tc; tc++)
                                for (j = 0; j < NUM_SQ_STATS; j++)
                                        sprintf(data +
                                                (idx++) * ETH_GSTRING_LEN,
@@ -242,7 +242,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
                                       ((u64 *)&priv->channel[i]->rq.stats)[j];
 
        for (i = 0; i < priv->params.num_channels; i++)
-               for (tc = 0; tc < priv->num_tc; tc++)
+               for (tc = 0; tc < priv->params.num_tc; tc++)
                        for (j = 0; j < NUM_SQ_STATS; j++)
                                data[idx++] = !test_bit(MLX5E_STATE_OPENED,
                                                        &priv->state) ? 0 :
@@ -264,7 +264,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
                               struct ethtool_ringparam *param)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
-       struct mlx5e_params new_params;
+       bool was_opened;
        u16 min_rx_wqes;
        u8 log_rq_size;
        u8 log_sq_size;
@@ -316,11 +316,18 @@ static int mlx5e_set_ringparam(struct net_device *dev,
                return 0;
 
        mutex_lock(&priv->state_lock);
-       new_params = priv->params;
-       new_params.log_rq_size = log_rq_size;
-       new_params.log_sq_size = log_sq_size;
-       new_params.min_rx_wqes = min_rx_wqes;
-       err = mlx5e_update_priv_params(priv, &new_params);
+
+       was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+       if (was_opened)
+               mlx5e_close_locked(dev);
+
+       priv->params.log_rq_size = log_rq_size;
+       priv->params.log_sq_size = log_sq_size;
+       priv->params.min_rx_wqes = min_rx_wqes;
+
+       if (was_opened)
+               err = mlx5e_open_locked(dev);
+
        mutex_unlock(&priv->state_lock);
 
        return err;
@@ -342,7 +349,7 @@ static int mlx5e_set_channels(struct net_device *dev,
        struct mlx5e_priv *priv = netdev_priv(dev);
        int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
        unsigned int count = ch->combined_count;
-       struct mlx5e_params new_params;
+       bool was_opened;
        int err = 0;
 
        if (!count) {
@@ -365,9 +372,16 @@ static int mlx5e_set_channels(struct net_device *dev,
                return 0;
 
        mutex_lock(&priv->state_lock);
-       new_params = priv->params;
-       new_params.num_channels = count;
-       err = mlx5e_update_priv_params(priv, &new_params);
+
+       was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+       if (was_opened)
+               mlx5e_close_locked(dev);
+
+       priv->params.num_channels = count;
+
+       if (was_opened)
+               err = mlx5e_open_locked(dev);
+
        mutex_unlock(&priv->state_lock);
 
        return err;
@@ -662,6 +676,101 @@ out:
        return err;
 }
 
+static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+                         u8 *hfunc)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+
+       if (hfunc)
+               *hfunc = priv->params.rss_hfunc;
+
+       return 0;
+}
+
+static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
+                         const u8 *key, const u8 hfunc)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       int err = 0;
+
+       if (hfunc == ETH_RSS_HASH_NO_CHANGE)
+               return 0;
+
+       if ((hfunc != ETH_RSS_HASH_XOR) &&
+           (hfunc != ETH_RSS_HASH_TOP))
+               return -EINVAL;
+
+       mutex_lock(&priv->state_lock);
+
+       priv->params.rss_hfunc = hfunc;
+       if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+               mlx5e_close_locked(dev);
+               err = mlx5e_open_locked(dev);
+       }
+
+       mutex_unlock(&priv->state_lock);
+
+       return err;
+}
+
+static int mlx5e_get_tunable(struct net_device *dev,
+                            const struct ethtool_tunable *tuna,
+                            void *data)
+{
+       const struct mlx5e_priv *priv = netdev_priv(dev);
+       int err = 0;
+
+       switch (tuna->id) {
+       case ETHTOOL_TX_COPYBREAK:
+               *(u32 *)data = priv->params.tx_max_inline;
+               break;
+       default:
+               err = -EINVAL;
+               break;
+       }
+
+       return err;
+}
+
+static int mlx5e_set_tunable(struct net_device *dev,
+                            const struct ethtool_tunable *tuna,
+                            const void *data)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       struct mlx5_core_dev *mdev = priv->mdev;
+       bool was_opened;
+       u32 val;
+       int err = 0;
+
+       switch (tuna->id) {
+       case ETHTOOL_TX_COPYBREAK:
+               val = *(u32 *)data;
+               if (val > mlx5e_get_max_inline_cap(mdev)) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               mutex_lock(&priv->state_lock);
+
+               was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+               if (was_opened)
+                       mlx5e_close_locked(dev);
+
+               priv->params.tx_max_inline = val;
+
+               if (was_opened)
+                       err = mlx5e_open_locked(dev);
+
+               mutex_unlock(&priv->state_lock);
+               break;
+       default:
+               err = -EINVAL;
+               break;
+       }
+
+       return err;
+}
+
 const struct ethtool_ops mlx5e_ethtool_ops = {
        .get_drvinfo       = mlx5e_get_drvinfo,
        .get_link          = ethtool_op_get_link,
@@ -676,4 +785,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
        .set_coalesce      = mlx5e_set_coalesce,
        .get_settings      = mlx5e_get_settings,
        .set_settings      = mlx5e_set_settings,
+       .get_rxfh          = mlx5e_get_rxfh,
+       .set_rxfh          = mlx5e_set_rxfh,
+       .get_tunable       = mlx5e_get_tunable,
+       .set_tunable       = mlx5e_set_tunable,
 };
index 120db80c47aac425bf3c9d2eaac8b6ccd4cd674b..70ec31b9e1e96b135829430df45a416444a21a11 100644 (file)
@@ -105,25 +105,41 @@ static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
 {
        void *ft = priv->ft.main;
 
-       if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
+               mlx5_del_flow_table_entry(ft,
+                                         ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP]);
+
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
+               mlx5_del_flow_table_entry(ft,
+                                         ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP]);
+
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
+               mlx5_del_flow_table_entry(ft,
+                                         ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH]);
+
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
+               mlx5_del_flow_table_entry(ft,
+                                         ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH]);
+
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
                mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
 
-       if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
                mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
 
-       if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
                mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
 
-       if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
                mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
 
-       if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
                mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
 
-       if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
+       if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
                mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
 
-       if (ai->tt_vec & (1 << MLX5E_TT_ANY))
+       if (ai->tt_vec & BIT(MLX5E_TT_ANY))
                mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
 }
 
@@ -156,33 +172,37 @@ static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
                switch (eth_addr_type) {
                case MLX5E_UC:
                        ret =
-                               (1 << MLX5E_TT_IPV4_TCP) |
-                               (1 << MLX5E_TT_IPV6_TCP) |
-                               (1 << MLX5E_TT_IPV4_UDP) |
-                               (1 << MLX5E_TT_IPV6_UDP) |
-                               (1 << MLX5E_TT_IPV4)     |
-                               (1 << MLX5E_TT_IPV6)     |
-                               (1 << MLX5E_TT_ANY)      |
+                               BIT(MLX5E_TT_IPV4_TCP)       |
+                               BIT(MLX5E_TT_IPV6_TCP)       |
+                               BIT(MLX5E_TT_IPV4_UDP)       |
+                               BIT(MLX5E_TT_IPV6_UDP)       |
+                               BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
+                               BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
+                               BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
+                               BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
+                               BIT(MLX5E_TT_IPV4)           |
+                               BIT(MLX5E_TT_IPV6)           |
+                               BIT(MLX5E_TT_ANY)            |
                                0;
                        break;
 
                case MLX5E_MC_IPV4:
                        ret =
-                               (1 << MLX5E_TT_IPV4_UDP) |
-                               (1 << MLX5E_TT_IPV4)     |
+                               BIT(MLX5E_TT_IPV4_UDP)       |
+                               BIT(MLX5E_TT_IPV4)           |
                                0;
                        break;
 
                case MLX5E_MC_IPV6:
                        ret =
-                               (1 << MLX5E_TT_IPV6_UDP) |
-                               (1 << MLX5E_TT_IPV6)     |
+                               BIT(MLX5E_TT_IPV6_UDP)       |
+                               BIT(MLX5E_TT_IPV6)           |
                                0;
                        break;
 
                case MLX5E_MC_OTHER:
                        ret =
-                               (1 << MLX5E_TT_ANY)      |
+                               BIT(MLX5E_TT_ANY)            |
                                0;
                        break;
                }
@@ -191,23 +211,27 @@ static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
 
        case MLX5E_ALLMULTI:
                ret =
-                       (1 << MLX5E_TT_IPV4_UDP) |
-                       (1 << MLX5E_TT_IPV6_UDP) |
-                       (1 << MLX5E_TT_IPV4)     |
-                       (1 << MLX5E_TT_IPV6)     |
-                       (1 << MLX5E_TT_ANY)      |
+                       BIT(MLX5E_TT_IPV4_UDP) |
+                       BIT(MLX5E_TT_IPV6_UDP) |
+                       BIT(MLX5E_TT_IPV4)     |
+                       BIT(MLX5E_TT_IPV6)     |
+                       BIT(MLX5E_TT_ANY)      |
                        0;
                break;
 
        default: /* MLX5E_PROMISC */
                ret =
-                       (1 << MLX5E_TT_IPV4_TCP) |
-                       (1 << MLX5E_TT_IPV6_TCP) |
-                       (1 << MLX5E_TT_IPV4_UDP) |
-                       (1 << MLX5E_TT_IPV6_UDP) |
-                       (1 << MLX5E_TT_IPV4)     |
-                       (1 << MLX5E_TT_IPV6)     |
-                       (1 << MLX5E_TT_ANY)      |
+                       BIT(MLX5E_TT_IPV4_TCP)       |
+                       BIT(MLX5E_TT_IPV6_TCP)       |
+                       BIT(MLX5E_TT_IPV4_UDP)       |
+                       BIT(MLX5E_TT_IPV6_UDP)       |
+                       BIT(MLX5E_TT_IPV4_IPSEC_AH)  |
+                       BIT(MLX5E_TT_IPV6_IPSEC_AH)  |
+                       BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
+                       BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
+                       BIT(MLX5E_TT_IPV4)           |
+                       BIT(MLX5E_TT_IPV6)           |
+                       BIT(MLX5E_TT_ANY)            |
                        0;
                break;
        }
@@ -226,6 +250,7 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
        u8   *match_criteria_dmac;
        void *ft   = priv->ft.main;
        u32  *tirn = priv->tirn;
+       u32  *ft_ix;
        u32  tt_vec;
        int  err;
 
@@ -261,51 +286,51 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
 
        tt_vec = mlx5e_get_tt_vec(ai, type);
 
-       if (tt_vec & (1 << MLX5E_TT_ANY)) {
+       ft_ix = &ai->ft_ix[MLX5E_TT_ANY];
+       if (tt_vec & BIT(MLX5E_TT_ANY)) {
                MLX5_SET(dest_format_struct, dest, destination_id,
                         tirn[MLX5E_TT_ANY]);
                err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
                                                match_criteria, flow_context,
-                                               &ai->ft_ix[MLX5E_TT_ANY]);
-               if (err) {
-                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
-                       return err;
-               }
-               ai->tt_vec |= (1 << MLX5E_TT_ANY);
+                                               ft_ix);
+               if (err)
+                       goto err_del_ai;
+
+               ai->tt_vec |= BIT(MLX5E_TT_ANY);
        }
 
        match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
        MLX5_SET_TO_ONES(fte_match_param, match_criteria,
                         outer_headers.ethertype);
 
-       if (tt_vec & (1 << MLX5E_TT_IPV4)) {
+       ft_ix = &ai->ft_ix[MLX5E_TT_IPV4];
+       if (tt_vec & BIT(MLX5E_TT_IPV4)) {
                MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
                         ETH_P_IP);
                MLX5_SET(dest_format_struct, dest, destination_id,
                         tirn[MLX5E_TT_IPV4]);
                err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
                                                match_criteria, flow_context,
-                                               &ai->ft_ix[MLX5E_TT_IPV4]);
-               if (err) {
-                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
-                       return err;
-               }
-               ai->tt_vec |= (1 << MLX5E_TT_IPV4);
+                                               ft_ix);
+               if (err)
+                       goto err_del_ai;
+
+               ai->tt_vec |= BIT(MLX5E_TT_IPV4);
        }
 
-       if (tt_vec & (1 << MLX5E_TT_IPV6)) {
+       ft_ix = &ai->ft_ix[MLX5E_TT_IPV6];
+       if (tt_vec & BIT(MLX5E_TT_IPV6)) {
                MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
                         ETH_P_IPV6);
                MLX5_SET(dest_format_struct, dest, destination_id,
                         tirn[MLX5E_TT_IPV6]);
                err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
                                                match_criteria, flow_context,
-                                               &ai->ft_ix[MLX5E_TT_IPV6]);
-               if (err) {
-                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
-                       return err;
-               }
-               ai->tt_vec |= (1 << MLX5E_TT_IPV6);
+                                               ft_ix);
+               if (err)
+                       goto err_del_ai;
+
+               ai->tt_vec |= BIT(MLX5E_TT_IPV6);
        }
 
        MLX5_SET_TO_ONES(fte_match_param, match_criteria,
@@ -313,70 +338,141 @@ static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
        MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
                 IPPROTO_UDP);
 
-       if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
+       ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_UDP];
+       if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
                MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
                         ETH_P_IP);
                MLX5_SET(dest_format_struct, dest, destination_id,
                         tirn[MLX5E_TT_IPV4_UDP]);
                err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
                                                match_criteria, flow_context,
-                                               &ai->ft_ix[MLX5E_TT_IPV4_UDP]);
-               if (err) {
-                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
-                       return err;
-               }
-               ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
+                                               ft_ix);
+               if (err)
+                       goto err_del_ai;
+
+               ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
        }
 
-       if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
+       ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_UDP];
+       if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
                MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
                         ETH_P_IPV6);
                MLX5_SET(dest_format_struct, dest, destination_id,
                         tirn[MLX5E_TT_IPV6_UDP]);
                err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
                                                match_criteria, flow_context,
-                                               &ai->ft_ix[MLX5E_TT_IPV6_UDP]);
-               if (err) {
-                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
-                       return err;
-               }
-               ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
+                                               ft_ix);
+               if (err)
+                       goto err_del_ai;
+
+               ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
        }
 
        MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
                 IPPROTO_TCP);
 
-       if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
+       ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_TCP];
+       if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
                MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
                         ETH_P_IP);
                MLX5_SET(dest_format_struct, dest, destination_id,
                         tirn[MLX5E_TT_IPV4_TCP]);
                err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
                                                match_criteria, flow_context,
-                                               &ai->ft_ix[MLX5E_TT_IPV4_TCP]);
-               if (err) {
-                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
-                       return err;
-               }
-               ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
+                                               ft_ix);
+               if (err)
+                       goto err_del_ai;
+
+               ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
        }
 
-       if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
+       ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_TCP];
+       if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
                MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
                         ETH_P_IPV6);
                MLX5_SET(dest_format_struct, dest, destination_id,
                         tirn[MLX5E_TT_IPV6_TCP]);
                err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
                                                match_criteria, flow_context,
-                                               &ai->ft_ix[MLX5E_TT_IPV6_TCP]);
-               if (err) {
-                       mlx5e_del_eth_addr_from_flow_table(priv, ai);
-                       return err;
-               }
-               ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
+                                               ft_ix);
+               if (err)
+                       goto err_del_ai;
+
+               ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
+       }
+
+       MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+                IPPROTO_AH);
+
+       ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_AH];
+       if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
+               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+                        ETH_P_IP);
+               MLX5_SET(dest_format_struct, dest, destination_id,
+                        tirn[MLX5E_TT_IPV4_IPSEC_AH]);
+               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+                                               match_criteria, flow_context,
+                                               ft_ix);
+               if (err)
+                       goto err_del_ai;
+
+               ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
+       }
+
+       ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_AH];
+       if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
+               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+                        ETH_P_IPV6);
+               MLX5_SET(dest_format_struct, dest, destination_id,
+                        tirn[MLX5E_TT_IPV6_IPSEC_AH]);
+               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+                                               match_criteria, flow_context,
+                                               ft_ix);
+               if (err)
+                       goto err_del_ai;
+
+               ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
+       }
+
+       MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+                IPPROTO_ESP);
+
+       ft_ix = &ai->ft_ix[MLX5E_TT_IPV4_IPSEC_ESP];
+       if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
+               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+                        ETH_P_IP);
+               MLX5_SET(dest_format_struct, dest, destination_id,
+                        tirn[MLX5E_TT_IPV4_IPSEC_ESP]);
+               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+                                               match_criteria, flow_context,
+                                               ft_ix);
+               if (err)
+                       goto err_del_ai;
+
+               ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
+       }
+
+       ft_ix = &ai->ft_ix[MLX5E_TT_IPV6_IPSEC_ESP];
+       if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
+               MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+                        ETH_P_IPV6);
+               MLX5_SET(dest_format_struct, dest, destination_id,
+                        tirn[MLX5E_TT_IPV6_IPSEC_ESP]);
+               err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+                                               match_criteria, flow_context,
+                                               ft_ix);
+               if (err)
+                       goto err_del_ai;
+
+               ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
        }
 
        return 0;
+
+err_del_ai:
+       mlx5e_del_eth_addr_from_flow_table(priv, ai);
+
+       return err;
 }
 
 static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
@@ -725,7 +821,7 @@ static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
        if (!g)
                return -ENOMEM;
 
-       g[0].log_sz = 2;
+       g[0].log_sz = 3;
        g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
        MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
                         outer_headers.ethertype);
index 40206da1f9d7b9e24ebd972e2b798f05dc4be46b..bb815893d3a8b40cb22f0a938b14487b028a9f56 100644 (file)
@@ -41,6 +41,7 @@ struct mlx5e_rq_param {
 struct mlx5e_sq_param {
        u32                        sqc[MLX5_ST_SZ_DW(sqc)];
        struct mlx5_wq_param       wq;
+       u16                        max_inline;
 };
 
 struct mlx5e_cq_param {
@@ -116,7 +117,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
                s->rx_csum_none += rq_stats->csum_none;
                s->rx_wqe_err   += rq_stats->wqe_err;
 
-               for (j = 0; j < priv->num_tc; j++) {
+               for (j = 0; j < priv->params.num_tc; j++) {
                        sq_stats = &priv->channel[i]->sq[j].stats;
 
                        s->tso_packets          += sq_stats->tso_packets;
@@ -272,6 +273,8 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
        int err;
        int i;
 
+       param->wq.db_numa_node = cpu_to_node(c->cpu);
+
        err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
                                &rq->wq_ctrl);
        if (err)
@@ -342,11 +345,11 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
 
        memcpy(rqc, param->rqc, sizeof(param->rqc));
 
-       MLX5_SET(rqc,  rqc, cqn,                c->rq.cq.mcq.cqn);
+       MLX5_SET(rqc,  rqc, cqn,                rq->cq.mcq.cqn);
        MLX5_SET(rqc,  rqc, state,              MLX5_RQC_STATE_RST);
        MLX5_SET(rqc,  rqc, flush_in_error_en,  1);
        MLX5_SET(wq,   wq,  log_wq_pg_sz,       rq->wq_ctrl.buf.page_shift -
-                                               PAGE_SHIFT);
+                                               MLX5_ADAPTER_PAGE_SHIFT);
        MLX5_SET64(wq, wq,  dbr_addr,           rq->wq_ctrl.db.dma);
 
        mlx5_fill_page_array(&rq->wq_ctrl.buf,
@@ -502,6 +505,8 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
        if (err)
                return err;
 
+       param->wq.db_numa_node = cpu_to_node(c->cpu);
+
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
                                 &sq->wq_ctrl);
        if (err)
@@ -509,7 +514,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
 
        sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
        sq->uar_map     = sq->uar.map;
+       sq->uar_bf_map  = sq->uar.bf_map;
        sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+       sq->max_inline  = param->max_inline;
 
        err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
        if (err)
@@ -518,11 +525,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
        txq_ix = c->ix + tc * priv->params.num_channels;
        sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
 
-       sq->pdev    = c->pdev;
-       sq->mkey_be = c->mkey_be;
-       sq->channel = c;
-       sq->tc      = tc;
-       sq->edge    = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+       sq->pdev      = c->pdev;
+       sq->mkey_be   = c->mkey_be;
+       sq->channel   = c;
+       sq->tc        = tc;
+       sq->edge      = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
+       sq->bf_budget = MLX5E_SQ_BF_BUDGET;
        priv->txq_to_sq_map[txq_ix] = sq;
 
        return 0;
@@ -569,7 +577,6 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
 
        memcpy(sqc, param->sqc, sizeof(param->sqc));
 
-       MLX5_SET(sqc,  sqc, user_index,         sq->tc);
        MLX5_SET(sqc,  sqc, tis_num_0,          priv->tisn[sq->tc]);
        MLX5_SET(sqc,  sqc, cqn,                c->sq[sq->tc].cq.mcq.cqn);
        MLX5_SET(sqc,  sqc, state,              MLX5_SQC_STATE_RST);
@@ -579,7 +586,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
        MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
        MLX5_SET(wq,   wq, uar_page,      sq->uar.index);
        MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
-                                         PAGE_SHIFT);
+                                         MLX5_ADAPTER_PAGE_SHIFT);
        MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
 
        mlx5_fill_page_array(&sq->wq_ctrl.buf,
@@ -702,7 +709,8 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
        int err;
        u32 i;
 
-       param->wq.numa = cpu_to_node(c->cpu);
+       param->wq.buf_numa_node = cpu_to_node(c->cpu);
+       param->wq.db_numa_node  = cpu_to_node(c->cpu);
        param->eq_ix   = c->ix;
 
        err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
@@ -773,7 +781,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
        MLX5_SET(cqc,   cqc, c_eqn,         eqn);
        MLX5_SET(cqc,   cqc, uar_page,      mcq->uar->index);
        MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
-                                           PAGE_SHIFT);
+                                           MLX5_ADAPTER_PAGE_SHIFT);
        MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
 
        err = mlx5_core_create_cq(mdev, mcq, in, inlen);
@@ -929,7 +937,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
        c->pdev     = &priv->mdev->pdev->dev;
        c->netdev   = priv->netdev;
        c->mkey_be  = cpu_to_be32(priv->mr.key);
-       c->num_tc   = priv->num_tc;
+       c->num_tc   = priv->params.num_tc;
 
        mlx5e_build_tc_to_txq_map(c, priv->params.num_channels);
 
@@ -1000,7 +1008,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
        MLX5_SET(wq, wq, log_wq_sz,        priv->params.log_rq_size);
        MLX5_SET(wq, wq, pd,               priv->pdn);
 
-       param->wq.numa   = dev_to_node(&priv->mdev->pdev->dev);
+       param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
        param->wq.linear = 1;
 }
 
@@ -1014,7 +1022,8 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
        MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
        MLX5_SET(wq, wq, pd,            priv->pdn);
 
-       param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+       param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
+       param->max_inline = priv->params.tx_max_inline;
 }
 
 static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -1059,27 +1068,28 @@ static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
 static int mlx5e_open_channels(struct mlx5e_priv *priv)
 {
        struct mlx5e_channel_param cparam;
+       int nch = priv->params.num_channels;
        int err = -ENOMEM;
        int i;
        int j;
 
-       priv->channel = kcalloc(priv->params.num_channels,
-                               sizeof(struct mlx5e_channel *), GFP_KERNEL);
+       priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
+                               GFP_KERNEL);
 
-       priv->txq_to_sq_map = kcalloc(priv->params.num_channels * priv->num_tc,
+       priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
                                      sizeof(struct mlx5e_sq *), GFP_KERNEL);
 
        if (!priv->channel || !priv->txq_to_sq_map)
                goto err_free_txq_to_sq_map;
 
        mlx5e_build_channel_param(priv, &cparam);
-       for (i = 0; i < priv->params.num_channels; i++) {
+       for (i = 0; i < nch; i++) {
                err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
                if (err)
                        goto err_close_channels;
        }
 
-       for (j = 0; j < priv->params.num_channels; j++) {
+       for (j = 0; j < nch; j++) {
                err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
                if (err)
                        goto err_close_channels;
@@ -1130,11 +1140,10 @@ static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
 
 static int mlx5e_open_tises(struct mlx5e_priv *priv)
 {
-       int num_tc = priv->num_tc;
        int err;
        int tc;
 
-       for (tc = 0; tc < num_tc; tc++) {
+       for (tc = 0; tc < priv->params.num_tc; tc++) {
                err = mlx5e_open_tis(priv, tc);
                if (err)
                        goto err_close_tises;
@@ -1151,26 +1160,41 @@ err_close_tises:
 
 static void mlx5e_close_tises(struct mlx5e_priv *priv)
 {
-       int num_tc = priv->num_tc;
        int tc;
 
-       for (tc = 0; tc < num_tc; tc++)
+       for (tc = 0; tc < priv->params.num_tc; tc++)
                mlx5e_close_tis(priv, tc);
 }
 
+static int mlx5e_rx_hash_fn(int hfunc)
+{
+       return (hfunc == ETH_RSS_HASH_TOP) ?
+              MLX5_RX_HASH_FN_TOEPLITZ :
+              MLX5_RX_HASH_FN_INVERTED_XOR8;
+}
+
+static int mlx5e_bits_invert(unsigned long a, int size)
+{
+       int inv = 0;
+       int i;
+
+       for (i = 0; i < size; i++)
+               inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
+
+       return inv;
+}
+
 static int mlx5e_open_rqt(struct mlx5e_priv *priv)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
        u32 *in;
-       u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
        void *rqtc;
        int inlen;
        int err;
-       int sz;
+       int log_tbl_sz = priv->params.rx_hash_log_tbl_sz;
+       int sz = 1 << log_tbl_sz;
        int i;
 
-       sz = 1 << priv->params.rx_hash_log_tbl_sz;
-
        inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
        in = mlx5_vzalloc(inlen);
        if (!in)
@@ -1182,17 +1206,16 @@ static int mlx5e_open_rqt(struct mlx5e_priv *priv)
        MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
 
        for (i = 0; i < sz; i++) {
-               int ix = i % priv->params.num_channels;
+               int ix = i;
 
+               if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
+                       ix = mlx5e_bits_invert(i, log_tbl_sz);
+
+               ix = ix % priv->params.num_channels;
                MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
        }
 
-       MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
-
-       memset(out, 0, sizeof(out));
-       err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
-       if (!err)
-               priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+       err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn);
 
        kvfree(in);
 
@@ -1201,16 +1224,7 @@ static int mlx5e_open_rqt(struct mlx5e_priv *priv)
 
 static void mlx5e_close_rqt(struct mlx5e_priv *priv)
 {
-       u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
-       u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
-
-       memset(in, 0, sizeof(in));
-
-       MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
-       MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
-
-       mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out,
-                                  sizeof(out));
+       mlx5_core_destroy_rqt(priv->mdev, priv->rqtn);
 }
 
 static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
@@ -1221,13 +1235,17 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
 
 #define ROUGH_MAX_L2_L3_HDR_SZ 256
 
-#define MLX5_HASH_IP     (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-                         MLX5_HASH_FIELD_SEL_DST_IP)
+#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+                                MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+                                MLX5_HASH_FIELD_SEL_DST_IP   |\
+                                MLX5_HASH_FIELD_SEL_L4_SPORT |\
+                                MLX5_HASH_FIELD_SEL_L4_DPORT)
 
-#define MLX5_HASH_ALL    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-                         MLX5_HASH_FIELD_SEL_DST_IP   |\
-                         MLX5_HASH_FIELD_SEL_L4_SPORT |\
-                         MLX5_HASH_FIELD_SEL_L4_DPORT)
+#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+                                MLX5_HASH_FIELD_SEL_DST_IP   |\
+                                MLX5_HASH_FIELD_SEL_IPSEC_SPI)
 
        if (priv->params.lro_en) {
                MLX5_SET(tirc, tirc, lro_enable_mask,
@@ -1254,12 +1272,16 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
                MLX5_SET(tirc, tirc, indirect_table,
                         priv->rqtn);
                MLX5_SET(tirc, tirc, rx_hash_fn,
-                        MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
-               MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
-               netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc,
-                                                rx_hash_toeplitz_key),
-                                   MLX5_FLD_SZ_BYTES(tirc,
-                                                     rx_hash_toeplitz_key));
+                        mlx5e_rx_hash_fn(priv->params.rss_hfunc));
+               if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
+                       void *rss_key = MLX5_ADDR_OF(tirc, tirc,
+                                                    rx_hash_toeplitz_key);
+                       size_t len = MLX5_FLD_SZ_BYTES(tirc,
+                                                      rx_hash_toeplitz_key);
+
+                       MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+                       netdev_rss_key_fill(rss_key, len);
+               }
                break;
        }
 
@@ -1270,7 +1292,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
                MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
                         MLX5_L4_PROT_TYPE_TCP);
                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_ALL);
+                        MLX5_HASH_IP_L4PORTS);
                break;
 
        case MLX5E_TT_IPV6_TCP:
@@ -1279,7 +1301,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
                MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
                         MLX5_L4_PROT_TYPE_TCP);
                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_ALL);
+                        MLX5_HASH_IP_L4PORTS);
                break;
 
        case MLX5E_TT_IPV4_UDP:
@@ -1288,7 +1310,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
                MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
                         MLX5_L4_PROT_TYPE_UDP);
                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_ALL);
+                        MLX5_HASH_IP_L4PORTS);
                break;
 
        case MLX5E_TT_IPV6_UDP:
@@ -1297,7 +1319,35 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
                MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
                         MLX5_L4_PROT_TYPE_UDP);
                MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_ALL);
+                        MLX5_HASH_IP_L4PORTS);
+               break;
+
+       case MLX5E_TT_IPV4_IPSEC_AH:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
+               break;
+
+       case MLX5E_TT_IPV6_IPSEC_AH:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
+               break;
+
+       case MLX5E_TT_IPV4_IPSEC_ESP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
+               break;
+
+       case MLX5E_TT_IPV6_IPSEC_ESP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
                break;
 
        case MLX5E_TT_IPV4:
@@ -1520,26 +1570,6 @@ static int mlx5e_close(struct net_device *netdev)
        return err;
 }
 
-int mlx5e_update_priv_params(struct mlx5e_priv *priv,
-                            struct mlx5e_params *new_params)
-{
-       int err = 0;
-       int was_opened;
-
-       WARN_ON(!mutex_is_locked(&priv->state_lock));
-
-       was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-       if (was_opened)
-               mlx5e_close_locked(priv->netdev);
-
-       priv->params = *new_params;
-
-       if (was_opened)
-               err = mlx5e_open_locked(priv->netdev);
-
-       return err;
-}
-
 static struct rtnl_link_stats64 *
 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
@@ -1589,20 +1619,22 @@ static int mlx5e_set_features(struct net_device *netdev,
                              netdev_features_t features)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
+       int err = 0;
        netdev_features_t changes = features ^ netdev->features;
-       struct mlx5e_params new_params;
-       bool update_params = false;
 
        mutex_lock(&priv->state_lock);
-       new_params = priv->params;
 
        if (changes & NETIF_F_LRO) {
-               new_params.lro_en = !!(features & NETIF_F_LRO);
-               update_params = true;
-       }
+               bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+
+               if (was_opened)
+                       mlx5e_close_locked(priv->netdev);
 
-       if (update_params)
-               mlx5e_update_priv_params(priv, &new_params);
+               priv->params.lro_en = !!(features & NETIF_F_LRO);
+
+               if (was_opened)
+                       err = mlx5e_open_locked(priv->netdev);
+       }
 
        if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
                if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
@@ -1620,8 +1652,9 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
+       bool was_opened;
        int max_mtu;
-       int err;
+       int err = 0;
 
        mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
 
@@ -1633,8 +1666,16 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
        }
 
        mutex_lock(&priv->state_lock);
+
+       was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+       if (was_opened)
+               mlx5e_close_locked(netdev);
+
        netdev->mtu = new_mtu;
-       err = mlx5e_update_priv_params(priv, &priv->params);
+
+       if (was_opened)
+               err = mlx5e_open_locked(netdev);
+
        mutex_unlock(&priv->state_lock);
 
        return err;
@@ -1673,6 +1714,15 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
        return 0;
 }
 
+u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
+{
+       int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+
+       return bf_buf_size -
+              sizeof(struct mlx5e_tx_wqe) +
+              2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
+}
+
 static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
                                    struct net_device *netdev,
                                    int num_comp_vectors)
@@ -1691,6 +1741,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
                MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
        priv->params.tx_cq_moderation_pkts =
                MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+       priv->params.tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
        priv->params.min_rx_wqes           =
                MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
        priv->params.rx_hash_log_tbl_sz    =
@@ -1700,6 +1751,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
                MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
        priv->params.num_tc                = 1;
        priv->params.default_vlan_prio     = 0;
+       priv->params.rss_hfunc             = ETH_RSS_HASH_XOR;
 
        priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
        priv->params.lro_wqe_sz            =
@@ -1708,7 +1760,6 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
        priv->mdev                         = mdev;
        priv->netdev                       = netdev;
        priv->params.num_channels          = num_comp_vectors;
-       priv->num_tc                       = priv->params.num_tc;
        priv->default_vlan_prio            = priv->params.default_vlan_prio;
 
        spin_lock_init(&priv->async_events_spinlock);
@@ -1733,9 +1784,8 @@ static void mlx5e_build_netdev(struct net_device *netdev)
 
        SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
 
-       if (priv->num_tc > 1) {
+       if (priv->params.num_tc > 1)
                mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
-       }
 
        netdev->netdev_ops        = &mlx5e_netdev_ops;
        netdev->watchdog_timeo    = 15 * HZ;
@@ -1819,36 +1869,31 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
 
        err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
        if (err) {
-               netdev_err(netdev, "%s: mlx5_alloc_map_uar failed, %d\n",
-                          __func__, err);
+               mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
                goto err_free_netdev;
        }
 
        err = mlx5_core_alloc_pd(mdev, &priv->pdn);
        if (err) {
-               netdev_err(netdev, "%s: mlx5_core_alloc_pd failed, %d\n",
-                          __func__, err);
+               mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
                goto err_unmap_free_uar;
        }
 
        err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
        if (err) {
-               netdev_err(netdev, "%s: mlx5_alloc_transport_domain failed, %d\n",
-                          __func__, err);
+               mlx5_core_err(mdev, "alloc td failed, %d\n", err);
                goto err_dealloc_pd;
        }
 
        err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
        if (err) {
-               netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n",
-                          __func__, err);
+               mlx5_core_err(mdev, "create mkey failed, %d\n", err);
                goto err_dealloc_transport_domain;
        }
 
        err = register_netdev(netdev);
        if (err) {
-               netdev_err(netdev, "%s: register_netdev failed, %d\n",
-                          __func__, err);
+               mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
                goto err_destroy_mkey;
        }
 
index 03f28f438e55ab690cc865b6bc3509a53d280e25..64380bc0cd6a5df34b99531c0565718a4d2b207c 100644 (file)
@@ -57,7 +57,7 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
 
        if (notify_hw) {
                cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
-               mlx5e_tx_notify_hw(sq, wqe);
+               mlx5e_tx_notify_hw(sq, wqe, 0);
        }
 }
 
@@ -110,9 +110,17 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
 }
 
 static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
-                                           struct sk_buff *skb)
+                                           struct sk_buff *skb, bool bf)
 {
-#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */
+       /* Some NIC TX decisions, e.g loopback, are based on the packet
+        * headers and occur before the data gather.
+        * Therefore these headers must be copied into the WQE
+        */
+#define MLX5E_MIN_INLINE (ETH_HLEN + 2/*vlan tag*/)
+
+       if (bf && (skb_headlen(skb) <= sq->max_inline))
+               return skb_headlen(skb);
+
        return MLX5E_MIN_INLINE;
 }
 
@@ -129,6 +137,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
 
        u8  opcode = MLX5_OPCODE_SEND;
        dma_addr_t dma_addr = 0;
+       bool bf = false;
        u16 headlen;
        u16 ds_cnt;
        u16 ihs;
@@ -141,6 +150,11 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
        else
                sq->stats.csum_offload_none++;
 
+       if (sq->cc != sq->prev_cc) {
+               sq->prev_cc = sq->cc;
+               sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
+       }
+
        if (skb_is_gso(skb)) {
                u32 payload_len;
 
@@ -153,7 +167,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
                sq->stats.tso_packets++;
                sq->stats.tso_bytes += payload_len;
        } else {
-               ihs = mlx5e_get_inline_hdr_size(sq, skb);
+               bf = sq->bf_budget &&
+                    !skb->xmit_more &&
+                    !skb_shinfo(skb)->nr_frags;
+               ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
                MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
                                                        ETH_ZLEN);
        }
@@ -225,14 +242,21 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
        }
 
        if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
+               int bf_sz = 0;
+
+               if (bf && sq->uar_bf_map)
+                       bf_sz = MLX5E_TX_SKB_CB(skb)->num_wqebbs << 3;
+
                cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
-               mlx5e_tx_notify_hw(sq, wqe);
+               mlx5e_tx_notify_hw(sq, wqe, bf_sz);
        }
 
        /* fill sq edge with nops to avoid wqe wrap around */
        while ((sq->pc & wq->sz_m1) > sq->edge)
                mlx5e_send_nop(sq, false);
 
+       sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
+
        sq->stats.packets++;
        return NETDEV_TX_OK;
 
index afad529838de748efc9f9253c6fde42abbe954a3..603a8b0908eea74a39bb88d9736d200d8acc3573 100644 (file)
@@ -455,7 +455,7 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
        struct mlx5_priv *priv  = &mdev->priv;
        struct msix_entry *msix = priv->msix_arr;
        int irq                 = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
-       int numa_node           = dev_to_node(&mdev->pdev->dev);
+       int numa_node           = priv->numa_node;
        int err;
 
        if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
@@ -654,6 +654,22 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
 }
 #endif
 
+static int map_bf_area(struct mlx5_core_dev *dev)
+{
+       resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
+       resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
+
+       dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
+
+       return dev->priv.bf_mapping ? 0 : -ENOMEM;
+}
+
+static void unmap_bf_area(struct mlx5_core_dev *dev)
+{
+       if (dev->priv.bf_mapping)
+               io_mapping_free(dev->priv.bf_mapping);
+}
+
 static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
 {
        struct mlx5_priv *priv = &dev->priv;
@@ -668,6 +684,10 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
        INIT_LIST_HEAD(&priv->pgdir_list);
        spin_lock_init(&priv->mkey_lock);
 
+       mutex_init(&priv->alloc_mutex);
+
+       priv->numa_node = dev_to_node(&dev->pdev->dev);
+
        priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
        if (!priv->dbg_root)
                return -ENOMEM;
@@ -804,10 +824,13 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
                goto err_stop_eqs;
        }
 
+       if (map_bf_area(dev))
+               dev_err(&pdev->dev, "Failed to map blue flame area\n");
+
        err = mlx5_irq_set_affinity_hints(dev);
        if (err) {
                dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
-               goto err_free_comp_eqs;
+               goto err_unmap_bf_area;
        }
 
        MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
@@ -819,7 +842,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
 
        return 0;
 
-err_free_comp_eqs:
+err_unmap_bf_area:
+       unmap_bf_area(dev);
+
        free_comp_eqs(dev);
 
 err_stop_eqs:
@@ -877,6 +902,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
        mlx5_cleanup_qp_table(dev);
        mlx5_cleanup_cq_table(dev);
        mlx5_irq_clear_affinity_hints(dev);
+       unmap_bf_area(dev);
        free_comp_eqs(dev);
        mlx5_stop_eqs(dev);
        mlx5_free_uuars(dev, &priv->uuari);
index fc88ecaecb4b4307f2d5c796cc91c548e856f845..566a70488db12ddef46f5623cb77c3e1e4c4ab2c 100644 (file)
@@ -73,7 +73,12 @@ static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in,
                                             int in_size, u32 *out,
                                             int out_size)
 {
-       mlx5_cmd_exec(dev, in, in_size, out, out_size);
+       int err;
+
+       err = mlx5_cmd_exec(dev, in, in_size, out, out_size);
+       if (err)
+               return err;
+
        return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
 }
 
index 8d98b03026d5db588eee7de23f04611f268b4543..c4f3f74908ec220254137ea7893cd373c6c3b507 100644 (file)
@@ -358,3 +358,32 @@ int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
        return  mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
                                           sizeof(out));
 }
+
+int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
+                        u32 *rqtn)
+{
+       u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
+       int err;
+
+       MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+
+       memset(out, 0, sizeof(out));
+       err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+       if (!err)
+               *rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+
+       return err;
+}
+
+void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
+       u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
+
+       memset(in, 0, sizeof(in));
+
+       MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+       MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
+
+       mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
index f9ef244710d534b5e22bddd313d1497b36da12fa..10bd75e7d9b1dd3f44fab37bc274bbe219040677 100644 (file)
@@ -61,4 +61,8 @@ int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
 int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
 int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
 
+int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
+                        u32 *rqtn);
+void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
+
 #endif /* __TRANSOBJ_H__ */
index 9ef85873ceea8203655c8e63cdc4601d15088157..eb05c845ece9247e7e54fee5880ad27be7be7253 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/io-mapping.h>
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/cmd.h>
 #include "mlx5_core.h"
@@ -246,6 +247,10 @@ int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
                goto err_free_uar;
        }
 
+       if (mdev->priv.bf_mapping)
+               uar->bf_map = io_mapping_map_wc(mdev->priv.bf_mapping,
+                                               uar->index << PAGE_SHIFT);
+
        return 0;
 
 err_free_uar:
@@ -257,6 +262,7 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar);
 
 void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
 {
+       io_mapping_unmap(uar->bf_map);
        iounmap(uar->map);
        mlx5_cmd_free_uar(mdev, uar->index);
 }
index 8388411582cf80cfc1e0ab9b8ac48491c2a68f97..ce21ee5b23577ee63e8b5679a1fe2204c3ccd895 100644 (file)
@@ -73,13 +73,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
        wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
        wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
 
-       err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+       err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
        if (err) {
                mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
                return err;
        }
 
-       err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf);
+       err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
+                                 &wq_ctrl->buf, param->buf_numa_node);
        if (err) {
                mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
                goto err_db_free;
@@ -108,13 +109,14 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
        wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
        wq->sz_m1 = (1 << wq->log_sz) - 1;
 
-       err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+       err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
        if (err) {
                mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
                return err;
        }
 
-       err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf);
+       err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
+                                 &wq_ctrl->buf, param->buf_numa_node);
        if (err) {
                mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
                goto err_db_free;
@@ -144,7 +146,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
        wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
        wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
 
-       err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+       err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
        if (err) {
                mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
                return err;
index e0ddd69fb429ff5a2ded06572f43949f2ec67945..6c2a8f95093c6b0ea1212ac8ae9b1e29f421dad0 100644 (file)
@@ -37,7 +37,8 @@
 
 struct mlx5_wq_param {
        int             linear;
-       int             numa;
+       int             buf_numa_node;
+       int             db_numa_node;
 };
 
 struct mlx5_wq_ctrl {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
new file mode 100644 (file)
index 0000000..8d1080d
--- /dev/null
@@ -0,0 +1,32 @@
+#
+# Mellanox switch drivers configuration
+#
+
+config MLXSW_CORE
+       tristate "Mellanox Technologies Switch ASICs support"
+       ---help---
+         This driver supports Mellanox Technologies Switch ASICs family.
+
+         To compile this driver as a module, choose M here: the
+         module will be called mlxsw_core.
+
+config MLXSW_PCI
+       tristate "PCI bus implementation for Mellanox Technologies Switch ASICs"
+       depends on PCI && MLXSW_CORE
+       default m
+       ---help---
+         This is PCI bus implementation for Mellanox Technologies Switch ASICs.
+
+         To compile this driver as a module, choose M here: the
+         module will be called mlxsw_pci.
+
+config MLXSW_SWITCHX2
+       tristate "Mellanox Technologies SwitchX-2 support"
+       depends on MLXSW_CORE && NET_SWITCHDEV
+       default m
+       ---help---
+         This driver supports Mellanox Technologies SwitchX-2 Ethernet
+         Switch ASICs.
+
+         To compile this driver as a module, choose M here: the
+         module will be called mlxsw_switchx2.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
new file mode 100644 (file)
index 0000000..0a05f65
--- /dev/null
@@ -0,0 +1,6 @@
+obj-$(CONFIG_MLXSW_CORE)       += mlxsw_core.o
+mlxsw_core-objs                        := core.o
+obj-$(CONFIG_MLXSW_PCI)                += mlxsw_pci.o
+mlxsw_pci-objs                 := pci.o
+obj-$(CONFIG_MLXSW_SWITCHX2)   += mlxsw_switchx2.o
+mlxsw_switchx2-objs            := switchx2.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
new file mode 100644 (file)
index 0000000..770db17
--- /dev/null
@@ -0,0 +1,1090 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/cmd.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_CMD_H
+#define _MLXSW_CMD_H
+
+#include "item.h"
+
+#define MLXSW_CMD_MBOX_SIZE    4096
+
+static inline char *mlxsw_cmd_mbox_alloc(void)
+{
+       return kzalloc(MLXSW_CMD_MBOX_SIZE, GFP_KERNEL);
+}
+
+static inline void mlxsw_cmd_mbox_free(char *mbox)
+{
+       kfree(mbox);
+}
+
+static inline void mlxsw_cmd_mbox_zero(char *mbox)
+{
+       memset(mbox, 0, MLXSW_CMD_MBOX_SIZE);
+}
+
+struct mlxsw_core;
+
+int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
+                  u32 in_mod, bool out_mbox_direct,
+                  char *in_mbox, size_t in_mbox_size,
+                  char *out_mbox, size_t out_mbox_size);
+
+static inline int mlxsw_cmd_exec_in(struct mlxsw_core *mlxsw_core, u16 opcode,
+                                   u8 opcode_mod, u32 in_mod, char *in_mbox,
+                                   size_t in_mbox_size)
+{
+       return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
+                             in_mbox, in_mbox_size, NULL, 0);
+}
+
+static inline int mlxsw_cmd_exec_out(struct mlxsw_core *mlxsw_core, u16 opcode,
+                                    u8 opcode_mod, u32 in_mod,
+                                    bool out_mbox_direct,
+                                    char *out_mbox, size_t out_mbox_size)
+{
+       return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod,
+                             out_mbox_direct, NULL, 0,
+                             out_mbox, out_mbox_size);
+}
+
+static inline int mlxsw_cmd_exec_none(struct mlxsw_core *mlxsw_core, u16 opcode,
+                                     u8 opcode_mod, u32 in_mod)
+{
+       return mlxsw_cmd_exec(mlxsw_core, opcode, opcode_mod, in_mod, false,
+                             NULL, 0, NULL, 0);
+}
+
+enum mlxsw_cmd_opcode {
+       MLXSW_CMD_OPCODE_QUERY_FW               = 0x004,
+       MLXSW_CMD_OPCODE_QUERY_BOARDINFO        = 0x006,
+       MLXSW_CMD_OPCODE_QUERY_AQ_CAP           = 0x003,
+       MLXSW_CMD_OPCODE_MAP_FA                 = 0xFFF,
+       MLXSW_CMD_OPCODE_UNMAP_FA               = 0xFFE,
+       MLXSW_CMD_OPCODE_CONFIG_PROFILE         = 0x100,
+       MLXSW_CMD_OPCODE_ACCESS_REG             = 0x040,
+       MLXSW_CMD_OPCODE_SW2HW_DQ               = 0x201,
+       MLXSW_CMD_OPCODE_HW2SW_DQ               = 0x202,
+       MLXSW_CMD_OPCODE_2ERR_DQ                = 0x01E,
+       MLXSW_CMD_OPCODE_QUERY_DQ               = 0x022,
+       MLXSW_CMD_OPCODE_SW2HW_CQ               = 0x016,
+       MLXSW_CMD_OPCODE_HW2SW_CQ               = 0x017,
+       MLXSW_CMD_OPCODE_QUERY_CQ               = 0x018,
+       MLXSW_CMD_OPCODE_SW2HW_EQ               = 0x013,
+       MLXSW_CMD_OPCODE_HW2SW_EQ               = 0x014,
+       MLXSW_CMD_OPCODE_QUERY_EQ               = 0x015,
+};
+
+static inline const char *mlxsw_cmd_opcode_str(u16 opcode)
+{
+       switch (opcode) {
+       case MLXSW_CMD_OPCODE_QUERY_FW:
+               return "QUERY_FW";
+       case MLXSW_CMD_OPCODE_QUERY_BOARDINFO:
+               return "QUERY_BOARDINFO";
+       case MLXSW_CMD_OPCODE_QUERY_AQ_CAP:
+               return "QUERY_AQ_CAP";
+       case MLXSW_CMD_OPCODE_MAP_FA:
+               return "MAP_FA";
+       case MLXSW_CMD_OPCODE_UNMAP_FA:
+               return "UNMAP_FA";
+       case MLXSW_CMD_OPCODE_CONFIG_PROFILE:
+               return "CONFIG_PROFILE";
+       case MLXSW_CMD_OPCODE_ACCESS_REG:
+               return "ACCESS_REG";
+       case MLXSW_CMD_OPCODE_SW2HW_DQ:
+               return "SW2HW_DQ";
+       case MLXSW_CMD_OPCODE_HW2SW_DQ:
+               return "HW2SW_DQ";
+       case MLXSW_CMD_OPCODE_2ERR_DQ:
+               return "2ERR_DQ";
+       case MLXSW_CMD_OPCODE_QUERY_DQ:
+               return "QUERY_DQ";
+       case MLXSW_CMD_OPCODE_SW2HW_CQ:
+               return "SW2HW_CQ";
+       case MLXSW_CMD_OPCODE_HW2SW_CQ:
+               return "HW2SW_CQ";
+       case MLXSW_CMD_OPCODE_QUERY_CQ:
+               return "QUERY_CQ";
+       case MLXSW_CMD_OPCODE_SW2HW_EQ:
+               return "SW2HW_EQ";
+       case MLXSW_CMD_OPCODE_HW2SW_EQ:
+               return "HW2SW_EQ";
+       case MLXSW_CMD_OPCODE_QUERY_EQ:
+               return "QUERY_EQ";
+       default:
+               return "*UNKNOWN*";
+       }
+}
+
+enum mlxsw_cmd_status {
+       /* Command execution succeeded. */
+       MLXSW_CMD_STATUS_OK             = 0x00,
+       /* Internal error (e.g. bus error) occurred while processing command. */
+       MLXSW_CMD_STATUS_INTERNAL_ERR   = 0x01,
+       /* Operation/command not supported or opcode modifier not supported. */
+       MLXSW_CMD_STATUS_BAD_OP         = 0x02,
+       /* Parameter not supported, parameter out of range. */
+       MLXSW_CMD_STATUS_BAD_PARAM      = 0x03,
+       /* System was not enabled or bad system state. */
+       MLXSW_CMD_STATUS_BAD_SYS_STATE  = 0x04,
+       /* Attempt to access reserved or unallocated resource, or resource in
+        * inappropriate ownership.
+        */
+       MLXSW_CMD_STATUS_BAD_RESOURCE   = 0x05,
+       /* Requested resource is currently executing a command. */
+       MLXSW_CMD_STATUS_RESOURCE_BUSY  = 0x06,
+       /* Required capability exceeds device limits. */
+       MLXSW_CMD_STATUS_EXCEED_LIM     = 0x08,
+       /* Resource is not in the appropriate state or ownership. */
+       MLXSW_CMD_STATUS_BAD_RES_STATE  = 0x09,
+       /* Index out of range (might be beyond table size or attempt to
+        * access a reserved resource).
+        */
+       MLXSW_CMD_STATUS_BAD_INDEX      = 0x0A,
+       /* NVMEM checksum/CRC failed. */
+       MLXSW_CMD_STATUS_BAD_NVMEM      = 0x0B,
+       /* Bad management packet (silently discarded). */
+       MLXSW_CMD_STATUS_BAD_PKT        = 0x30,
+};
+
+static inline const char *mlxsw_cmd_status_str(u8 status)
+{
+       switch (status) {
+       case MLXSW_CMD_STATUS_OK:
+               return "OK";
+       case MLXSW_CMD_STATUS_INTERNAL_ERR:
+               return "INTERNAL_ERR";
+       case MLXSW_CMD_STATUS_BAD_OP:
+               return "BAD_OP";
+       case MLXSW_CMD_STATUS_BAD_PARAM:
+               return "BAD_PARAM";
+       case MLXSW_CMD_STATUS_BAD_SYS_STATE:
+               return "BAD_SYS_STATE";
+       case MLXSW_CMD_STATUS_BAD_RESOURCE:
+               return "BAD_RESOURCE";
+       case MLXSW_CMD_STATUS_RESOURCE_BUSY:
+               return "RESOURCE_BUSY";
+       case MLXSW_CMD_STATUS_EXCEED_LIM:
+               return "EXCEED_LIM";
+       case MLXSW_CMD_STATUS_BAD_RES_STATE:
+               return "BAD_RES_STATE";
+       case MLXSW_CMD_STATUS_BAD_INDEX:
+               return "BAD_INDEX";
+       case MLXSW_CMD_STATUS_BAD_NVMEM:
+               return "BAD_NVMEM";
+       case MLXSW_CMD_STATUS_BAD_PKT:
+               return "BAD_PKT";
+       default:
+               return "*UNKNOWN*";
+       }
+}
+
+/* QUERY_FW - Query Firmware
+ * -------------------------
+ * OpMod == 0, INMmod == 0
+ * -----------------------
+ * The QUERY_FW command retrieves information related to firmware, command
+ * interface version and the amount of resources that should be allocated to
+ * the firmware.
+ */
+
+static inline int mlxsw_cmd_query_fw(struct mlxsw_core *mlxsw_core,
+                                    char *out_mbox)
+{
+       return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_FW,
+                                 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_query_fw_fw_pages
+ * Amount of physical memory to be allocatedfor firmware usage in 4KB pages.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_pages, 0x00, 16, 16);
+
+/* cmd_mbox_query_fw_fw_rev_major
+ * Firmware Revision - Major
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_major, 0x00, 0, 16);
+
+/* cmd_mbox_query_fw_fw_rev_subminor
+ * Firmware Sub-minor version (Patch level)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_subminor, 0x04, 16, 16);
+
+/* cmd_mbox_query_fw_fw_rev_minor
+ * Firmware Revision - Minor
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_rev_minor, 0x04, 0, 16);
+
+/* cmd_mbox_query_fw_core_clk
+ * Internal Clock Frequency (in MHz)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, core_clk, 0x08, 16, 16);
+
+/* cmd_mbox_query_fw_cmd_interface_rev
+ * Command Interface Interpreter Revision ID. This number is bumped up
+ * every time a non-backward-compatible change is done for the command
+ * interface. The current cmd_interface_rev is 1.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, cmd_interface_rev, 0x08, 0, 16);
+
+/* cmd_mbox_query_fw_dt
+ * If set, Debug Trace is supported
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, dt, 0x0C, 31, 1);
+
+/* cmd_mbox_query_fw_api_version
+ * Indicates the version of the API, to enable software querying
+ * for compatibility. The current api_version is 1.
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, api_version, 0x0C, 0, 16);
+
+/* cmd_mbox_query_fw_fw_hour
+ * Firmware timestamp - hour
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_hour, 0x10, 24, 8);
+
+/* cmd_mbox_query_fw_fw_minutes
+ * Firmware timestamp - minutes
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_minutes, 0x10, 16, 8);
+
+/* cmd_mbox_query_fw_fw_seconds
+ * Firmware timestamp - seconds
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_seconds, 0x10, 8, 8);
+
+/* cmd_mbox_query_fw_fw_year
+ * Firmware timestamp - year
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_year, 0x14, 16, 16);
+
+/* cmd_mbox_query_fw_fw_month
+ * Firmware timestamp - month
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_month, 0x14, 8, 8);
+
+/* cmd_mbox_query_fw_fw_day
+ * Firmware timestamp - day
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, fw_day, 0x14, 0, 8);
+
+/* cmd_mbox_query_fw_clr_int_base_offset
+ * Clear Interrupt register's offset from clr_int_bar register
+ * in PCI address space.
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, clr_int_base_offset, 0x20, 0, 64);
+
+/* cmd_mbox_query_fw_clr_int_bar
+ * PCI base address register (BAR) where clr_int register is located.
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, clr_int_bar, 0x28, 30, 2);
+
+/* cmd_mbox_query_fw_error_buf_offset
+ * Read Only buffer for internal error reports of offset
+ * from error_buf_bar register in PCI address space).
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, error_buf_offset, 0x30, 0, 64);
+
+/* cmd_mbox_query_fw_error_buf_size
+ * Internal error buffer size in DWORDs
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, error_buf_size, 0x38, 0, 32);
+
+/* cmd_mbox_query_fw_error_int_bar
+ * PCI base address register (BAR) where error buffer
+ * register is located.
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, error_int_bar, 0x3C, 30, 2);
+
+/* cmd_mbox_query_fw_doorbell_page_offset
+ * Offset of the doorbell page
+ */
+MLXSW_ITEM64(cmd_mbox, query_fw, doorbell_page_offset, 0x40, 0, 64);
+
+/* cmd_mbox_query_fw_doorbell_page_bar
+ * PCI base address register (BAR) of the doorbell page
+ * 00 - BAR 0-1 (64 bit BAR)
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, doorbell_page_bar, 0x48, 30, 2);
+
+/* QUERY_BOARDINFO - Query Board Information
+ * -----------------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The QUERY_BOARDINFO command retrieves adapter specific parameters.
+ */
+
+static inline int mlxsw_cmd_boardinfo(struct mlxsw_core *mlxsw_core,
+                                     char *out_mbox)
+{
+       return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_BOARDINFO,
+                                 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_boardinfo_intapin
+ * When PCIe interrupt messages are being used, this value is used for clearing
+ * an interrupt. When using MSI-X, this register is not used.
+ */
+MLXSW_ITEM32(cmd_mbox, boardinfo, intapin, 0x10, 24, 8);
+
+/* cmd_mbox_boardinfo_vsd_vendor_id
+ * PCISIG Vendor ID (www.pcisig.com/membership/vid_search) of the vendor
+ * specifying/formatting the VSD. The vsd_vendor_id identifies the management
+ * domain of the VSD/PSID data. Different vendors may choose different VSD/PSID
+ * format and encoding as long as they use their assigned vsd_vendor_id.
+ */
+MLXSW_ITEM32(cmd_mbox, boardinfo, vsd_vendor_id, 0x1C, 0, 16);
+
+/* cmd_mbox_boardinfo_vsd
+ * Vendor Specific Data. The VSD string that is burnt to the Flash
+ * with the firmware.
+ */
+#define MLXSW_CMD_BOARDINFO_VSD_LEN 208
+MLXSW_ITEM_BUF(cmd_mbox, boardinfo, vsd, 0x20, MLXSW_CMD_BOARDINFO_VSD_LEN);
+
+/* cmd_mbox_boardinfo_psid
+ * The PSID field is a 16-ascii (byte) character string which acts as
+ * the board ID. The PSID format is used in conjunction with
+ * Mellanox vsd_vendor_id (15B3h).
+ */
+#define MLXSW_CMD_BOARDINFO_PSID_LEN 16
+MLXSW_ITEM_BUF(cmd_mbox, boardinfo, psid, 0xF0, MLXSW_CMD_BOARDINFO_PSID_LEN);
+
+/* QUERY_AQ_CAP - Query Asynchronous Queues Capabilities
+ * -----------------------------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The QUERY_AQ_CAP command returns the device asynchronous queues
+ * capabilities supported.
+ */
+
+static inline int mlxsw_cmd_query_aq_cap(struct mlxsw_core *mlxsw_core,
+                                        char *out_mbox)
+{
+       return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_AQ_CAP,
+                                 0, 0, false, out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_query_aq_cap_log_max_sdq_sz
+ * Log (base 2) of max WQEs allowed on SDQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_sdq_sz, 0x00, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_sdqs
+ * Maximum number of SDQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_sdqs, 0x00, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_rdq_sz
+ * Log (base 2) of max WQEs allowed on RDQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_rdq_sz, 0x04, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_rdqs
+ * Maximum number of RDQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_rdqs, 0x04, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_cq_sz
+ * Log (base 2) of max CQEs allowed on CQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cq_sz, 0x08, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_cqs
+ * Maximum number of CQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_cqs, 0x08, 0, 8);
+
+/* cmd_mbox_query_aq_cap_log_max_eq_sz
+ * Log (base 2) of max EQEs allowed on EQ.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_eq_sz, 0x0C, 24, 8);
+
+/* cmd_mbox_query_aq_cap_max_num_eqs
+ * Maximum number of EQs.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_eqs, 0x0C, 0, 8);
+
+/* cmd_mbox_query_aq_cap_max_sg_sq
+ * The maximum S/G list elements in an DSQ. DSQ must not contain
+ * more S/G entries than indicated here.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_sq, 0x10, 8, 8);
+
+/* cmd_mbox_query_aq_cap_
+ * The maximum S/G list elements in an DRQ. DRQ must not contain
+ * more S/G entries than indicated here.
+ */
+MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_rq, 0x10, 0, 8);
+
+/* MAP_FA - Map Firmware Area
+ * --------------------------
+ * OpMod == 0 (N/A), INMmod == Number of VPM entries
+ * -------------------------------------------------
+ * The MAP_FA command passes physical pages to the switch. These pages
+ * are used to store the device firmware. MAP_FA can be executed multiple
+ * times until all the firmware area is mapped (the size that should be
+ * mapped is retrieved through the QUERY_FW command). All required pages
+ * must be mapped to finish the initialization phase. Physical memory
+ * passed in this command must be pinned.
+ */
+
+static inline int mlxsw_cmd_map_fa(struct mlxsw_core *mlxsw_core,
+                                  char *in_mbox, u32 vpm_entries_count)
+{
+       return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_MAP_FA,
+                                0, vpm_entries_count,
+                                in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_map_fa_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, map_fa, pa, 0x00, 12, 52, 0x08, 0x00, true);
+
+/* cmd_mbox_map_fa_log2size
+ * Log (base 2) of the size in 4KB pages of the physical and contiguous memory
+ * that starts at PA_L/H.
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, map_fa, log2size, 0x00, 0, 5, 0x08, 0x04, false);
+
+/* UNMAP_FA - Unmap Firmware Area
+ * ------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The UNMAP_FA command unload the firmware and unmaps all the
+ * firmware area. After this command is completed the device will not access
+ * the pages that were mapped to the firmware area. After executing UNMAP_FA
+ * command, software reset must be done prior to execution of MAP_FW command.
+ */
+
+static inline int mlxsw_cmd_unmap_fa(struct mlxsw_core *mlxsw_core)
+{
+       return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_UNMAP_FA, 0, 0);
+}
+
+/* CONFIG_PROFILE (Set) - Configure Switch Profile
+ * ------------------------------
+ * OpMod == 1 (Set), INMmod == 0 (N/A)
+ * -----------------------------------
+ * The CONFIG_PROFILE command sets the switch profile. The command can be
+ * executed on the device only once at startup in order to allocate and
+ * configure all switch resources and prepare it for operational mode.
+ * It is not possible to change the device profile after the chip is
+ * in operational mode.
+ * Failure of the CONFIG_PROFILE command leaves the hardware in an indeterminate
+ * state therefore it is required to perform software reset to the device
+ * following an unsuccessful completion of the command. It is required
+ * to perform software reset to the device to change an existing profile.
+ */
+
+static inline int mlxsw_cmd_config_profile_set(struct mlxsw_core *mlxsw_core,
+                                              char *in_mbox)
+{
+       return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_CONFIG_PROFILE,
+                                1, 0, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_config_profile_set_max_vepa_channels
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vepa_channels, 0x0C, 0, 1);
+
+/* cmd_mbox_config_profile_set_max_lag
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_lag, 0x0C, 1, 1);
+
+/* cmd_mbox_config_profile_set_max_port_per_lag
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_port_per_lag, 0x0C, 2, 1);
+
+/* cmd_mbox_config_profile_set_max_mid
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_mid, 0x0C, 3, 1);
+
+/* cmd_mbox_config_profile_set_max_pgt
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pgt, 0x0C, 4, 1);
+
+/* cmd_mbox_config_profile_set_max_system_port
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_system_port, 0x0C, 5, 1);
+
+/* cmd_mbox_config_profile_set_max_vlan_groups
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vlan_groups, 0x0C, 6, 1);
+
+/* cmd_mbox_config_profile_set_max_regions
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_regions, 0x0C, 7, 1);
+
+/* cmd_mbox_config_profile_set_fid_based
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_mode, 0x0C, 8, 1);
+
+/* cmd_mbox_config_profile_set_max_flood_tables
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_flood_tables, 0x0C, 9, 1);
+
+/* cmd_mbox_config_profile_set_max_ib_mc
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_ib_mc, 0x0C, 12, 1);
+
+/* cmd_mbox_config_profile_set_max_pkey
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_max_pkey, 0x0C, 13, 1);
+
+/* cmd_mbox_config_profile_set_adaptive_routing_group_cap
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile,
+            set_adaptive_routing_group_cap, 0x0C, 14, 1);
+
+/* cmd_mbox_config_profile_set_ar_sec
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1);
+
+/* cmd_mbox_config_profile_max_vepa_channels
+ * Maximum number of VEPA channels per port (0 through 16)
+ * 0 - multi-channel VEPA is disabled
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vepa_channels, 0x10, 0, 8);
+
+/* cmd_mbox_config_profile_max_lag
+ * Maximum number of LAG IDs requested.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_lag, 0x14, 0, 16);
+
+/* cmd_mbox_config_profile_max_port_per_lag
+ * Maximum number of ports per LAG requested.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_port_per_lag, 0x18, 0, 16);
+
+/* cmd_mbox_config_profile_max_mid
+ * Maximum Multicast IDs.
+ * Multicast IDs are allocated from 0 to max_mid-1
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_mid, 0x1C, 0, 16);
+
+/* cmd_mbox_config_profile_max_pgt
+ * Maximum records in the Port Group Table per Switch Partition.
+ * Port Group Table indexes are from 0 to max_pgt-1
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_pgt, 0x20, 0, 16);
+
+/* cmd_mbox_config_profile_max_system_port
+ * The maximum number of system ports that can be allocated.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_system_port, 0x24, 0, 16);
+
+/* cmd_mbox_config_profile_max_vlan_groups
+ * Maximum number VLAN Groups for VLAN binding.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vlan_groups, 0x28, 0, 12);
+
+/* cmd_mbox_config_profile_max_regions
+ * Maximum number of TCAM Regions.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_regions, 0x2C, 0, 16);
+
+/* cmd_mbox_config_profile_max_flood_tables
+ * Maximum number of Flooding Tables. Flooding Tables are associated to
+ * the different packet types for the different switch partitions.
+ * Note that the table size depends on the fid_based mode.
+ * In SwitchX silicon, tables are split equally between the switch
+ * partitions. e.g. for 2 swids and 8 tables, the first 4 are associated
+ * with swid-1 and the last 4 are associated with swid-2.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
+
+/* cmd_mbox_config_profile_max_vid_flood_tables
+ * Maximum number of per-vid flooding tables. Flooding tables are associated
+ * to the different packet types for the different switch partitions.
+ * Table size is 4K entries covering all VID space.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4);
+
+/* cmd_mbox_config_profile_fid_based
+ * FID Based Flood Mode
+ * 00 Do not use FID to offset the index into the Port Group Table/Multicast ID
+ * 01 Use FID to offset the index to the Port Group Table (pgi)
+ * 10 Use FID to offset the index to the Port Group Table (pgi) and
+ * the Multicast ID
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2);
+
+/* cmd_mbox_config_profile_max_ib_mc
+ * Maximum number of multicast FDB records for InfiniBand
+ * FDB (in 512 chunks) per InfiniBand switch partition.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_ib_mc, 0x40, 0, 15);
+
+/* cmd_mbox_config_profile_max_pkey
+ * Maximum per port PKEY table size (for PKEY enforcement)
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, max_pkey, 0x44, 0, 15);
+
+/* cmd_mbox_config_profile_ar_sec
+ * Primary/secondary capability
+ * Describes the number of adaptive routing sub-groups
+ * 0 - disable primary/secondary (single group)
+ * 1 - enable primary/secondary (2 sub-groups)
+ * 2 - 3 sub-groups: Not supported in SwitchX, SwitchX-2
+ * 3 - 4 sub-groups: Not supported in SwitchX, SwitchX-2
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, ar_sec, 0x4C, 24, 2);
+
+/* cmd_mbox_config_profile_adaptive_routing_group_cap
+ * Adaptive Routing Group Capability. Indicates the number of AR groups
+ * supported. Note that when Primary/secondary is enabled, each
+ * primary/secondary couple consumes 2 adaptive routing entries.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16);
+
+/* cmd_mbox_config_profile_arn
+ * Adaptive Routing Notification Enable
+ * Not supported in SwitchX, SwitchX-2
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1);
+
+/* cmd_mbox_config_profile_swid_config_mask
+ * Modify Switch Partition Configuration mask. When set, the configu-
+ * ration value for the Switch Partition are taken from the mailbox.
+ * When clear, the current configuration values are used.
+ * Bit 0 - set type
+ * Bit 1 - properties
+ * Other - reserved
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_mask,
+                    0x60, 24, 8, 0x08, 0x00, false);
+
+/* cmd_mbox_config_profile_swid_config_type
+ * Switch Partition type.
+ * 0000 - disabled (Switch Partition does not exist)
+ * 0001 - InfiniBand
+ * 0010 - Ethernet
+ * 1000 - router port (SwitchX-2 only)
+ * Other - reserved
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type,
+                    0x60, 20, 4, 0x08, 0x00, false);
+
+/* cmd_mbox_config_profile_swid_config_properties
+ * Switch Partition properties.
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties,
+                    0x60, 0, 8, 0x08, 0x00, false);
+
+/* ACCESS_REG - Access EMAD Supported Register
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == 0 (N/A)
+ * -------------------------------------
+ * The ACCESS_REG command supports accessing device registers. This access
+ * is mainly used for bootstrapping.
+ */
+
+static inline int mlxsw_cmd_access_reg(struct mlxsw_core *mlxsw_core,
+                                      char *in_mbox, char *out_mbox)
+{
+       return mlxsw_cmd_exec(mlxsw_core, MLXSW_CMD_OPCODE_ACCESS_REG,
+                             0, 0, false, in_mbox, MLXSW_CMD_MBOX_SIZE,
+                             out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* SW2HW_DQ - Software to Hardware DQ
+ * ----------------------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The SW2HW_DQ command transitions a descriptor queue from software to
+ * hardware ownership. The command enables posting WQEs and ringing DoorBells
+ * on the descriptor queue.
+ */
+
+static inline int __mlxsw_cmd_sw2hw_dq(struct mlxsw_core *mlxsw_core,
+                                      char *in_mbox, u32 dq_number,
+                                      u8 opcode_mod)
+{
+       return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_DQ,
+                                opcode_mod, dq_number,
+                                in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+enum {
+       MLXSW_CMD_OPCODE_MOD_SDQ = 0,
+       MLXSW_CMD_OPCODE_MOD_RDQ = 1,
+};
+
+static inline int mlxsw_cmd_sw2hw_sdq(struct mlxsw_core *mlxsw_core,
+                                     char *in_mbox, u32 dq_number)
+{
+       return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number,
+                                   MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_sw2hw_rdq(struct mlxsw_core *mlxsw_core,
+                                     char *in_mbox, u32 dq_number)
+{
+       return __mlxsw_cmd_sw2hw_dq(mlxsw_core, in_mbox, dq_number,
+                                   MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* cmd_mbox_sw2hw_dq_cq
+ * Number of the CQ that this Descriptor Queue reports completions to.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, cq, 0x00, 24, 8);
+
+/* cmd_mbox_sw2hw_dq_sdq_tclass
+ * SDQ: CPU Egress TClass
+ * RDQ: Reserved
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, sdq_tclass, 0x00, 16, 6);
+
+/* cmd_mbox_sw2hw_dq_log2_dq_sz
+ * Log (base 2) of the Descriptor Queue size in 4KB pages.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_dq, log2_dq_sz, 0x00, 0, 6);
+
+/* cmd_mbox_sw2hw_dq_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_dq, pa, 0x10, 12, 52, 0x08, 0x00, true);
+
+/* HW2SW_DQ - Hardware to Software DQ
+ * ----------------------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The HW2SW_DQ command transitions a descriptor queue from hardware to
+ * software ownership. Incoming packets on the DQ are silently discarded,
+ * SW should not post descriptors on nonoperational DQs.
+ */
+
+static inline int __mlxsw_cmd_hw2sw_dq(struct mlxsw_core *mlxsw_core,
+                                      u32 dq_number, u8 opcode_mod)
+{
+       return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_DQ,
+                                  opcode_mod, dq_number);
+}
+
+static inline int mlxsw_cmd_hw2sw_sdq(struct mlxsw_core *mlxsw_core,
+                                     u32 dq_number)
+{
+       return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number,
+                                   MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_hw2sw_rdq(struct mlxsw_core *mlxsw_core,
+                                     u32 dq_number)
+{
+       return __mlxsw_cmd_hw2sw_dq(mlxsw_core, dq_number,
+                                   MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* 2ERR_DQ - To Error DQ
+ * ---------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The 2ERR_DQ command transitions the DQ into the error state from the state
+ * in which it has been. While the command is executed, some in-process
+ * descriptors may complete. Once the DQ transitions into the error state,
+ * if there are posted descriptors on the RDQ/SDQ, the hardware writes
+ * a completion with error (flushed) for all descriptors posted in the RDQ/SDQ.
+ * When the command is completed successfully, the DQ is already in
+ * the error state.
+ */
+
+static inline int __mlxsw_cmd_2err_dq(struct mlxsw_core *mlxsw_core,
+                                     u32 dq_number, u8 opcode_mod)
+{
+       return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ,
+                                  opcode_mod, dq_number);
+}
+
+static inline int mlxsw_cmd_2err_sdq(struct mlxsw_core *mlxsw_core,
+                                    u32 dq_number)
+{
+       return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number,
+                                  MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_2err_rdq(struct mlxsw_core *mlxsw_core,
+                                    u32 dq_number)
+{
+       return __mlxsw_cmd_2err_dq(mlxsw_core, dq_number,
+                                  MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* QUERY_DQ - Query DQ
+ * ---------------------
+ * OpMod == 0 (send DQ) / OpMod == 1 (receive DQ)
+ * INMmod == DQ number
+ * ----------------------------------------------
+ * The QUERY_DQ command retrieves a snapshot of DQ parameters from the hardware.
+ *
+ * Note: Output mailbox has the same format as SW2HW_DQ.
+ */
+
+static inline int __mlxsw_cmd_query_dq(struct mlxsw_core *mlxsw_core,
+                                      char *out_mbox, u32 dq_number,
+                                      u8 opcode_mod)
+{
+       return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_2ERR_DQ,
+                                 opcode_mod, dq_number, false,
+                                 out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+static inline int mlxsw_cmd_query_sdq(struct mlxsw_core *mlxsw_core,
+                                     char *out_mbox, u32 dq_number)
+{
+       return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number,
+                                   MLXSW_CMD_OPCODE_MOD_SDQ);
+}
+
+static inline int mlxsw_cmd_query_rdq(struct mlxsw_core *mlxsw_core,
+                                     char *out_mbox, u32 dq_number)
+{
+       return __mlxsw_cmd_query_dq(mlxsw_core, out_mbox, dq_number,
+                                   MLXSW_CMD_OPCODE_MOD_RDQ);
+}
+
+/* SW2HW_CQ - Software to Hardware CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The SW2HW_CQ command transfers ownership of a CQ context entry from software
+ * to hardware. The command takes the CQ context entry from the input mailbox
+ * and stores it in the CQC in the ownership of the hardware. The command fails
+ * if the requested CQC entry is already in the ownership of the hardware.
+ */
+
+static inline int mlxsw_cmd_sw2hw_cq(struct mlxsw_core *mlxsw_core,
+                                    char *in_mbox, u32 cq_number)
+{
+       return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_CQ,
+                                0, cq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_sw2hw_cq_cv
+ * CQE Version.
+ * 0 - CQE Version 0, 1 - CQE Version 1
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4);
+
+/* cmd_mbox_sw2hw_cq_c_eqn
+ * Event Queue this CQ reports completion events to.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, c_eqn, 0x00, 24, 1);
+
+/* cmd_mbox_sw2hw_cq_oi
+ * When set, overrun ignore is enabled. When set, updates of
+ * CQ consumer counter (poll for completion) or Request completion
+ * notifications (Arm CQ) DoorBells should not be rung on that CQ.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, oi, 0x00, 12, 1);
+
+/* cmd_mbox_sw2hw_cq_st
+ * Event delivery state machine
+ * 0x0 - FIRED
+ * 0x1 - ARMED (Request for Notification)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, st, 0x00, 8, 1);
+
+/* cmd_mbox_sw2hw_cq_log_cq_size
+ * Log (base 2) of the CQ size (in entries).
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, log_cq_size, 0x00, 0, 4);
+
+/* cmd_mbox_sw2hw_cq_producer_counter
+ * Producer Counter. The counter is incremented for each CQE that is
+ * written by the HW to the CQ.
+ * Maintained by HW (valid for the QUERY_CQ command only)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_cq, producer_counter, 0x04, 0, 16);
+
+/* cmd_mbox_sw2hw_cq_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_cq, pa, 0x10, 11, 53, 0x08, 0x00, true);
+
+/* HW2SW_CQ - Hardware to Software CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The HW2SW_CQ command transfers ownership of a CQ context entry from hardware
+ * to software. The CQC entry is invalidated as a result of this command.
+ */
+
+static inline int mlxsw_cmd_hw2sw_cq(struct mlxsw_core *mlxsw_core,
+                                    u32 cq_number)
+{
+       return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_CQ,
+                                  0, cq_number);
+}
+
+/* QUERY_CQ - Query CQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == CQ number
+ * -------------------------------------
+ * The QUERY_CQ command retrieves a snapshot of the current CQ context entry.
+ * The command stores the snapshot in the output mailbox in the software format.
+ * Note that the CQ context state and values are not affected by the QUERY_CQ
+ * command. The QUERY_CQ command is for debug purposes only.
+ *
+ * Note: Output mailbox has the same format as SW2HW_CQ.
+ */
+
+static inline int mlxsw_cmd_query_cq(struct mlxsw_core *mlxsw_core,
+                                    char *out_mbox, u32 cq_number)
+{
+       return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_CQ,
+                                 0, cq_number, false,
+                                 out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* SW2HW_EQ - Software to Hardware EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ * The SW2HW_EQ command transfers ownership of an EQ context entry from software
+ * to hardware. The command takes the EQ context entry from the input mailbox
+ * and stores it in the EQC in the ownership of the hardware. The command fails
+ * if the requested EQC entry is already in the ownership of the hardware.
+ */
+
+static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core,
+                                    char *in_mbox, u32 eq_number)
+{
+       return mlxsw_cmd_exec_in(mlxsw_core, MLXSW_CMD_OPCODE_SW2HW_EQ,
+                                0, eq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_sw2hw_eq_int_msix
+ * When set, MSI-X cycles will be generated by this EQ.
+ * When cleared, an interrupt will be generated by this EQ.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1);
+
+/* cmd_mbox_sw2hw_eq_int_oi
+ * When set, overrun ignore is enabled.
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1);
+
+/* cmd_mbox_sw2hw_eq_int_st
+ * Event delivery state machine
+ * 0x0 - FIRED
+ * 0x1 - ARMED (Request for Notification)
+ * 0x11 - Always ARMED
+ * other - reserved
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, st, 0x00, 8, 2);
+
+/* cmd_mbox_sw2hw_eq_int_log_eq_size
+ * Log (base 2) of the EQ size (in entries).
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, log_eq_size, 0x00, 0, 4);
+
+/* cmd_mbox_sw2hw_eq_int_producer_counter
+ * Producer Counter. The counter is incremented for each EQE that is written
+ * by the HW to the EQ.
+ * Maintained by HW (valid for the QUERY_EQ command only)
+ */
+MLXSW_ITEM32(cmd_mbox, sw2hw_eq, producer_counter, 0x04, 0, 16);
+
+/* cmd_mbox_sw2hw_eq_int_pa
+ * Physical Address.
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_eq, pa, 0x10, 11, 53, 0x08, 0x00, true);
+
+/* HW2SW_EQ - Hardware to Software EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ */
+
+static inline int mlxsw_cmd_hw2sw_eq(struct mlxsw_core *mlxsw_core,
+                                    u32 eq_number)
+{
+       return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_HW2SW_EQ,
+                                  0, eq_number);
+}
+
+/* QUERY_EQ - Query EQ
+ * ----------------------------------
+ * OpMod == 0 (N/A), INMmod == EQ number
+ * -------------------------------------
+ *
+ * Note: Output mailbox has the same format as SW2HW_EQ.
+ */
+
+static inline int mlxsw_cmd_query_eq(struct mlxsw_core *mlxsw_core,
+                                    char *out_mbox, u32 eq_number)
+{
+       return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_EQ,
+                                 0, eq_number, false,
+                                 out_mbox, MLXSW_CMD_MBOX_SIZE);
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
new file mode 100644 (file)
index 0000000..ad66ae4
--- /dev/null
@@ -0,0 +1,1286 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/if_link.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/netdevice.h>
+#include <linux/wait.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/string.h>
+#include <linux/gfp.h>
+#include <linux/random.h>
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/rcupdate.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+
+#include "core.h"
+#include "item.h"
+#include "cmd.h"
+#include "port.h"
+#include "trap.h"
+#include "emad.h"
+#include "reg.h"
+
+static LIST_HEAD(mlxsw_core_driver_list);
+static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
+
+static const char mlxsw_core_driver_name[] = "mlxsw_core";
+
+static struct dentry *mlxsw_core_dbg_root;
+
+struct mlxsw_core_pcpu_stats {
+       u64                     trap_rx_packets[MLXSW_TRAP_ID_MAX];
+       u64                     trap_rx_bytes[MLXSW_TRAP_ID_MAX];
+       u64                     port_rx_packets[MLXSW_PORT_MAX_PORTS];
+       u64                     port_rx_bytes[MLXSW_PORT_MAX_PORTS];
+       struct u64_stats_sync   syncp;
+       u32                     trap_rx_dropped[MLXSW_TRAP_ID_MAX];
+       u32                     port_rx_dropped[MLXSW_PORT_MAX_PORTS];
+       u32                     trap_rx_invalid;
+       u32                     port_rx_invalid;
+};
+
+struct mlxsw_core {
+       struct mlxsw_driver *driver;
+       const struct mlxsw_bus *bus;
+       void *bus_priv;
+       const struct mlxsw_bus_info *bus_info;
+       struct list_head rx_listener_list;
+       struct list_head event_listener_list;
+       struct {
+               struct sk_buff *resp_skb;
+               u64 tid;
+               wait_queue_head_t wait;
+               bool trans_active;
+               struct mutex lock; /* One EMAD transaction at a time. */
+               bool use_emad;
+       } emad;
+       struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
+       struct dentry *dbg_dir;
+       struct {
+               struct debugfs_blob_wrapper vsd_blob;
+               struct debugfs_blob_wrapper psid_blob;
+       } dbg;
+       unsigned long driver_priv[0];
+       /* driver_priv has to be always the last item */
+};
+
+struct mlxsw_rx_listener_item {
+       struct list_head list;
+       struct mlxsw_rx_listener rxl;
+       void *priv;
+};
+
+struct mlxsw_event_listener_item {
+       struct list_head list;
+       struct mlxsw_event_listener el;
+       void *priv;
+};
+
+/******************
+ * EMAD processing
+ ******************/
+
+/* emad_eth_hdr_dmac
+ * Destination MAC in EMAD's Ethernet header.
+ * Must be set to 01:02:c9:00:00:01
+ */
+MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
+
+/* emad_eth_hdr_smac
+ * Source MAC in EMAD's Ethernet header.
+ * Must be set to 00:02:c9:01:02:03
+ */
+MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
+
+/* emad_eth_hdr_ethertype
+ * Ethertype in EMAD's Ethernet header.
+ * Must be set to 0x8932
+ */
+MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
+
+/* emad_eth_hdr_mlx_proto
+ * Mellanox protocol.
+ * Must be set to 0x0.
+ */
+MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
+
+/* emad_eth_hdr_ver
+ * Mellanox protocol version.
+ * Must be set to 0x0.
+ */
+MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
+
+/* emad_op_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x1 (operation TLV).
+ */
+MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
+
+/* emad_op_tlv_len
+ * Length of the operation TLV in u32.
+ * Must be set to 0x4.
+ */
+MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
+
+/* emad_op_tlv_dr
+ * Direct route bit. Setting to 1 indicates the EMAD is a direct route
+ * EMAD. DR TLV must follow.
+ *
+ * Note: Currently not supported and must not be set.
+ */
+MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
+
+/* emad_op_tlv_status
+ * Returned status in case of EMAD response. Must be set to 0 in case
+ * of EMAD request.
+ * 0x0 - success
+ * 0x1 - device is busy. Requester should retry
+ * 0x2 - Mellanox protocol version not supported
+ * 0x3 - unknown TLV
+ * 0x4 - register not supported
+ * 0x5 - operation class not supported
+ * 0x6 - EMAD method not supported
+ * 0x7 - bad parameter (e.g. port out of range)
+ * 0x8 - resource not available
+ * 0x9 - message receipt acknowledgment. Requester should retry
+ * 0x70 - internal error
+ */
+MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
+
+/* emad_op_tlv_register_id
+ * Register ID of register within register TLV.
+ */
+MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
+
+/* emad_op_tlv_r
+ * Response bit. Setting to 1 indicates Response, otherwise request.
+ */
+MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
+
+/* emad_op_tlv_method
+ * EMAD method type.
+ * 0x1 - query
+ * 0x2 - write
+ * 0x3 - send (currently not supported)
+ * 0x4 - event
+ */
+MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
+
+/* emad_op_tlv_class
+ * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
+ */
+MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
+
+/* emad_op_tlv_tid
+ * EMAD transaction ID. Used for pairing request and response EMADs.
+ */
+MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
+
+/* emad_reg_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x3 (register TLV).
+ */
+MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
+
+/* emad_reg_tlv_len
+ * Length of the operation TLV in u32.
+ */
+MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
+
+/* emad_end_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x0 (end TLV).
+ */
+MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
+
+/* emad_end_tlv_len
+ * Length of the end TLV in u32.
+ * Must be set to 1.
+ */
+MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
+
+enum mlxsw_core_reg_access_type {
+       MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
+       MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
+};
+
+static inline const char *
+mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
+{
+       switch (type) {
+       case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
+               return "query";
+       case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
+               return "write";
+       }
+       BUG();
+}
+
+static void mlxsw_emad_pack_end_tlv(char *end_tlv)
+{
+       mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
+       mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
+}
+
+static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
+                                   const struct mlxsw_reg_info *reg,
+                                   char *payload)
+{
+       mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
+       mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
+       memcpy(reg_tlv + sizeof(u32), payload, reg->len);
+}
+
+static void mlxsw_emad_pack_op_tlv(char *op_tlv,
+                                  const struct mlxsw_reg_info *reg,
+                                  enum mlxsw_core_reg_access_type type,
+                                  struct mlxsw_core *mlxsw_core)
+{
+       mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
+       mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
+       mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
+       mlxsw_emad_op_tlv_status_set(op_tlv, 0);
+       mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
+       mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
+       if (MLXSW_CORE_REG_ACCESS_TYPE_QUERY == type)
+               mlxsw_emad_op_tlv_method_set(op_tlv,
+                                            MLXSW_EMAD_OP_TLV_METHOD_QUERY);
+       else
+               mlxsw_emad_op_tlv_method_set(op_tlv,
+                                            MLXSW_EMAD_OP_TLV_METHOD_WRITE);
+       mlxsw_emad_op_tlv_class_set(op_tlv,
+                                   MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
+       mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid);
+}
+
+static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
+{
+       char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
+
+       mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
+       mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
+       mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
+       mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
+       mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
+
+       skb_reset_mac_header(skb);
+
+       return 0;
+}
+
+static void mlxsw_emad_construct(struct sk_buff *skb,
+                                const struct mlxsw_reg_info *reg,
+                                char *payload,
+                                enum mlxsw_core_reg_access_type type,
+                                struct mlxsw_core *mlxsw_core)
+{
+       char *buf;
+
+       buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
+       mlxsw_emad_pack_end_tlv(buf);
+
+       buf = skb_push(skb, reg->len + sizeof(u32));
+       mlxsw_emad_pack_reg_tlv(buf, reg, payload);
+
+       buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
+       mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core);
+
+       mlxsw_emad_construct_eth_hdr(skb);
+}
+
+static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
+{
+       return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
+}
+
+static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
+{
+       return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
+                                     MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
+}
+
+static char *mlxsw_emad_reg_payload(const char *op_tlv)
+{
+       return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
+}
+
+static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
+{
+       char *op_tlv;
+
+       op_tlv = mlxsw_emad_op_tlv(skb);
+       return mlxsw_emad_op_tlv_tid_get(op_tlv);
+}
+
+static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
+{
+       char *op_tlv;
+
+       op_tlv = mlxsw_emad_op_tlv(skb);
+       return (MLXSW_EMAD_OP_TLV_RESPONSE == mlxsw_emad_op_tlv_r_get(op_tlv));
+}
+
+#define MLXSW_EMAD_TIMEOUT_MS 200
+
+static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
+                                struct sk_buff *skb,
+                                const struct mlxsw_tx_info *tx_info)
+{
+       int err;
+       int ret;
+
+       err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
+       if (err) {
+               dev_warn(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
+                        mlxsw_core->emad.tid);
+               dev_kfree_skb(skb);
+               return err;
+       }
+
+       mlxsw_core->emad.trans_active = true;
+       ret = wait_event_timeout(mlxsw_core->emad.wait,
+                                !(mlxsw_core->emad.trans_active),
+                                msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
+       if (!ret) {
+               dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
+                        mlxsw_core->emad.tid);
+               mlxsw_core->emad.trans_active = false;
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
+                                    char *op_tlv)
+{
+       enum mlxsw_emad_op_tlv_status status;
+       u64 tid;
+
+       status = mlxsw_emad_op_tlv_status_get(op_tlv);
+       tid = mlxsw_emad_op_tlv_tid_get(op_tlv);
+
+       switch (status) {
+       case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
+               return 0;
+       case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
+       case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
+               dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n",
+                        tid, status, mlxsw_emad_op_tlv_status_str(status));
+               return -EAGAIN;
+       case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
+       case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
+       case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
+       case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
+       case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
+       case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
+       case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
+       case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
+       default:
+               dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n",
+                       tid, status, mlxsw_emad_op_tlv_status_str(status));
+               return -EIO;
+       }
+}
+
+static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core,
+                                        struct sk_buff *skb)
+{
+       return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb));
+}
+
+static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
+                              struct sk_buff *skb,
+                              const struct mlxsw_tx_info *tx_info)
+{
+       struct sk_buff *trans_skb;
+       int n_retry;
+       int err;
+
+       n_retry = 0;
+retry:
+       /* We copy the EMAD to a new skb, since we might need
+        * to retransmit it in case of failure.
+        */
+       trans_skb = skb_copy(skb, GFP_KERNEL);
+       if (!trans_skb) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info);
+       if (!err) {
+               struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb;
+
+               err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb);
+               if (err)
+                       dev_kfree_skb(resp_skb);
+               if (!err || err != -EAGAIN)
+                       goto out;
+       }
+       if (n_retry++ < MLXSW_EMAD_MAX_RETRY)
+               goto retry;
+
+out:
+       dev_kfree_skb(skb);
+       mlxsw_core->emad.tid++;
+       return err;
+}
+
+static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
+                                       void *priv)
+{
+       struct mlxsw_core *mlxsw_core = priv;
+
+       if (mlxsw_emad_is_resp(skb) &&
+           mlxsw_core->emad.trans_active &&
+           mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) {
+               mlxsw_core->emad.resp_skb = skb;
+               mlxsw_core->emad.trans_active = false;
+               wake_up(&mlxsw_core->emad.wait);
+       } else {
+               dev_kfree_skb(skb);
+       }
+}
+
+static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
+       .func = mlxsw_emad_rx_listener_func,
+       .local_port = MLXSW_PORT_DONT_CARE,
+       .trap_id = MLXSW_TRAP_ID_ETHEMAD,
+};
+
+static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
+{
+       char htgt_pl[MLXSW_REG_HTGT_LEN];
+       char hpkt_pl[MLXSW_REG_HPKT_LEN];
+       int err;
+
+       mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD);
+       err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+       if (err)
+               return err;
+
+       mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+                           MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+                           MLXSW_TRAP_ID_ETHEMAD);
+       return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+}
+
+static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
+{
+       int err;
+
+       /* Set the upper 32 bits of the transaction ID field to a random
+        * number. This allows us to discard EMADs addressed to other
+        * devices.
+        */
+       get_random_bytes(&mlxsw_core->emad.tid, 4);
+       mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32;
+
+       init_waitqueue_head(&mlxsw_core->emad.wait);
+       mlxsw_core->emad.trans_active = false;
+       mutex_init(&mlxsw_core->emad.lock);
+
+       err = mlxsw_core_rx_listener_register(mlxsw_core,
+                                             &mlxsw_emad_rx_listener,
+                                             mlxsw_core);
+       if (err)
+               return err;
+
+       err = mlxsw_emad_traps_set(mlxsw_core);
+       if (err)
+               goto err_emad_trap_set;
+
+       mlxsw_core->emad.use_emad = true;
+
+       return 0;
+
+err_emad_trap_set:
+       mlxsw_core_rx_listener_unregister(mlxsw_core,
+                                         &mlxsw_emad_rx_listener,
+                                         mlxsw_core);
+       return err;
+}
+
+static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
+{
+       char hpkt_pl[MLXSW_REG_HPKT_LEN];
+
+       mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
+                           MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+                           MLXSW_TRAP_ID_ETHEMAD);
+       mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
+
+       mlxsw_core_rx_listener_unregister(mlxsw_core,
+                                         &mlxsw_emad_rx_listener,
+                                         mlxsw_core);
+}
+
+static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
+                                       u16 reg_len)
+{
+       struct sk_buff *skb;
+       u16 emad_len;
+
+       emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
+                   (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
+                   sizeof(u32) + mlxsw_core->driver->txhdr_len);
+       if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
+               return NULL;
+
+       skb = netdev_alloc_skb(NULL, emad_len);
+       if (!skb)
+               return NULL;
+       memset(skb->data, 0, emad_len);
+       skb_reserve(skb, emad_len);
+
+       return skb;
+}
+
+/*****************
+ * Core functions
+ *****************/
+
+static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data)
+{
+       struct mlxsw_core *mlxsw_core = file->private;
+       struct mlxsw_core_pcpu_stats *p;
+       u64 rx_packets, rx_bytes;
+       u64 tmp_rx_packets, tmp_rx_bytes;
+       u32 rx_dropped, rx_invalid;
+       unsigned int start;
+       int i;
+       int j;
+       static const char hdr[] =
+               "     NUM   RX_PACKETS     RX_BYTES RX_DROPPED\n";
+
+       seq_printf(file, hdr);
+       for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) {
+               rx_packets = 0;
+               rx_bytes = 0;
+               rx_dropped = 0;
+               for_each_possible_cpu(j) {
+                       p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+                       do {
+                               start = u64_stats_fetch_begin(&p->syncp);
+                               tmp_rx_packets = p->trap_rx_packets[i];
+                               tmp_rx_bytes = p->trap_rx_bytes[i];
+                       } while (u64_stats_fetch_retry(&p->syncp, start));
+
+                       rx_packets += tmp_rx_packets;
+                       rx_bytes += tmp_rx_bytes;
+                       rx_dropped += p->trap_rx_dropped[i];
+               }
+               seq_printf(file, "trap %3d %12llu %12llu %10u\n",
+                          i, rx_packets, rx_bytes, rx_dropped);
+       }
+       rx_invalid = 0;
+       for_each_possible_cpu(j) {
+               p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+               rx_invalid += p->trap_rx_invalid;
+       }
+       seq_printf(file, "trap INV                           %10u\n",
+                  rx_invalid);
+
+       for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) {
+               rx_packets = 0;
+               rx_bytes = 0;
+               rx_dropped = 0;
+               for_each_possible_cpu(j) {
+                       p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+                       do {
+                               start = u64_stats_fetch_begin(&p->syncp);
+                               tmp_rx_packets = p->port_rx_packets[i];
+                               tmp_rx_bytes = p->port_rx_bytes[i];
+                       } while (u64_stats_fetch_retry(&p->syncp, start));
+
+                       rx_packets += tmp_rx_packets;
+                       rx_bytes += tmp_rx_bytes;
+                       rx_dropped += p->port_rx_dropped[i];
+               }
+               seq_printf(file, "port %3d %12llu %12llu %10u\n",
+                          i, rx_packets, rx_bytes, rx_dropped);
+       }
+       rx_invalid = 0;
+       for_each_possible_cpu(j) {
+               p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
+               rx_invalid += p->port_rx_invalid;
+       }
+       seq_printf(file, "port INV                           %10u\n",
+                  rx_invalid);
+       return 0;
+}
+
+static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f)
+{
+       struct mlxsw_core *mlxsw_core = inode->i_private;
+
+       return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core);
+}
+
+static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
+       .owner = THIS_MODULE,
+       .open = mlxsw_core_rx_stats_dbg_open,
+       .release = single_release,
+       .read = seq_read,
+       .llseek = seq_lseek
+};
+
+static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
+                                   const char *buf, size_t size)
+{
+       __be32 *m = (__be32 *) buf;
+       int i;
+       int count = size / sizeof(__be32);
+
+       for (i = count - 1; i >= 0; i--)
+               if (m[i])
+                       break;
+       i++;
+       count = i ? i : 1;
+       for (i = 0; i < count; i += 4)
+               dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
+                       i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
+                       be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
+}
+
+int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
+{
+       spin_lock(&mlxsw_core_driver_list_lock);
+       list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
+       spin_unlock(&mlxsw_core_driver_list_lock);
+       return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_driver_register);
+
+void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
+{
+       spin_lock(&mlxsw_core_driver_list_lock);
+       list_del(&mlxsw_driver->list);
+       spin_unlock(&mlxsw_core_driver_list_lock);
+}
+EXPORT_SYMBOL(mlxsw_core_driver_unregister);
+
+static struct mlxsw_driver *__driver_find(const char *kind)
+{
+       struct mlxsw_driver *mlxsw_driver;
+
+       list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
+               if (strcmp(mlxsw_driver->kind, kind) == 0)
+                       return mlxsw_driver;
+       }
+       return NULL;
+}
+
+static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
+{
+       struct mlxsw_driver *mlxsw_driver;
+
+       spin_lock(&mlxsw_core_driver_list_lock);
+       mlxsw_driver = __driver_find(kind);
+       if (!mlxsw_driver) {
+               spin_unlock(&mlxsw_core_driver_list_lock);
+               request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind);
+               spin_lock(&mlxsw_core_driver_list_lock);
+               mlxsw_driver = __driver_find(kind);
+       }
+       if (mlxsw_driver) {
+               if (!try_module_get(mlxsw_driver->owner))
+                       mlxsw_driver = NULL;
+       }
+
+       spin_unlock(&mlxsw_core_driver_list_lock);
+       return mlxsw_driver;
+}
+
+static void mlxsw_core_driver_put(const char *kind)
+{
+       struct mlxsw_driver *mlxsw_driver;
+
+       spin_lock(&mlxsw_core_driver_list_lock);
+       mlxsw_driver = __driver_find(kind);
+       spin_unlock(&mlxsw_core_driver_list_lock);
+       if (!mlxsw_driver)
+               return;
+       module_put(mlxsw_driver->owner);
+}
+
+static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
+{
+       const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
+
+       mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name,
+                                                mlxsw_core_dbg_root);
+       if (!mlxsw_core->dbg_dir)
+               return -ENOMEM;
+       debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir,
+                           mlxsw_core, &mlxsw_core_rx_stats_dbg_ops);
+       mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd;
+       mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd);
+       debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir,
+                           &mlxsw_core->dbg.vsd_blob);
+       mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid;
+       mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid);
+       debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir,
+                           &mlxsw_core->dbg.psid_blob);
+       return 0;
+}
+
+static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
+{
+       debugfs_remove_recursive(mlxsw_core->dbg_dir);
+}
+
+int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+                                  const struct mlxsw_bus *mlxsw_bus,
+                                  void *bus_priv)
+{
+       const char *device_kind = mlxsw_bus_info->device_kind;
+       struct mlxsw_core *mlxsw_core;
+       struct mlxsw_driver *mlxsw_driver;
+       size_t alloc_size;
+       int err;
+
+       mlxsw_driver = mlxsw_core_driver_get(device_kind);
+       if (!mlxsw_driver)
+               return -EINVAL;
+       alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
+       mlxsw_core = kzalloc(alloc_size, GFP_KERNEL);
+       if (!mlxsw_core) {
+               err = -ENOMEM;
+               goto err_core_alloc;
+       }
+
+       INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
+       INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
+       mlxsw_core->driver = mlxsw_driver;
+       mlxsw_core->bus = mlxsw_bus;
+       mlxsw_core->bus_priv = bus_priv;
+       mlxsw_core->bus_info = mlxsw_bus_info;
+
+       mlxsw_core->pcpu_stats =
+               netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats);
+       if (!mlxsw_core->pcpu_stats) {
+               err = -ENOMEM;
+               goto err_alloc_stats;
+       }
+
+       err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile);
+       if (err)
+               goto err_bus_init;
+
+       err = mlxsw_emad_init(mlxsw_core);
+       if (err)
+               goto err_emad_init;
+
+       err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core,
+                                mlxsw_bus_info);
+       if (err)
+               goto err_driver_init;
+
+       err = mlxsw_core_debugfs_init(mlxsw_core);
+       if (err)
+               goto err_debugfs_init;
+
+       return 0;
+
+err_debugfs_init:
+       mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+err_driver_init:
+       mlxsw_emad_fini(mlxsw_core);
+err_emad_init:
+       mlxsw_bus->fini(bus_priv);
+err_bus_init:
+       free_percpu(mlxsw_core->pcpu_stats);
+err_alloc_stats:
+       kfree(mlxsw_core);
+err_core_alloc:
+       mlxsw_core_driver_put(device_kind);
+       return err;
+}
+EXPORT_SYMBOL(mlxsw_core_bus_device_register);
+
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
+{
+       const char *device_kind = mlxsw_core->bus_info->device_kind;
+
+       mlxsw_core_debugfs_fini(mlxsw_core);
+       mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+       mlxsw_emad_fini(mlxsw_core);
+       mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+       free_percpu(mlxsw_core->pcpu_stats);
+       kfree(mlxsw_core);
+       mlxsw_core_driver_put(device_kind);
+}
+EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
+
+static struct mlxsw_core *__mlxsw_core_get(void *driver_priv)
+{
+       return container_of(driver_priv, struct mlxsw_core, driver_priv);
+}
+
+int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+                           const struct mlxsw_tx_info *tx_info)
+{
+       struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
+
+       return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
+                                            tx_info);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_transmit);
+
+static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
+                                  const struct mlxsw_rx_listener *rxl_b)
+{
+       return (rxl_a->func == rxl_b->func &&
+               rxl_a->local_port == rxl_b->local_port &&
+               rxl_a->trap_id == rxl_b->trap_id);
+}
+
+static struct mlxsw_rx_listener_item *
+__find_rx_listener_item(struct mlxsw_core *mlxsw_core,
+                       const struct mlxsw_rx_listener *rxl,
+                       void *priv)
+{
+       struct mlxsw_rx_listener_item *rxl_item;
+
+       list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
+               if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
+                   rxl_item->priv == priv)
+                       return rxl_item;
+       }
+       return NULL;
+}
+
+int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
+                                   const struct mlxsw_rx_listener *rxl,
+                                   void *priv)
+{
+       struct mlxsw_rx_listener_item *rxl_item;
+
+       rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
+       if (rxl_item)
+               return -EEXIST;
+       rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
+       if (!rxl_item)
+               return -ENOMEM;
+       rxl_item->rxl = *rxl;
+       rxl_item->priv = priv;
+
+       list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
+       return 0;
+}
+EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
+
+void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
+                                      const struct mlxsw_rx_listener *rxl,
+                                      void *priv)
+{
+       struct mlxsw_rx_listener_item *rxl_item;
+
+       rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
+       if (!rxl_item)
+               return;
+       list_del_rcu(&rxl_item->list);
+       synchronize_rcu();
+       kfree(rxl_item);
+}
+EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
+
+static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
+                                          void *priv)
+{
+       struct mlxsw_event_listener_item *event_listener_item = priv;
+       struct mlxsw_reg_info reg;
+       char *payload;
+       char *op_tlv = mlxsw_emad_op_tlv(skb);
+       char *reg_tlv = mlxsw_emad_reg_tlv(skb);
+
+       reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
+       reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
+       payload = mlxsw_emad_reg_payload(op_tlv);
+       event_listener_item->el.func(&reg, payload, event_listener_item->priv);
+       dev_kfree_skb(skb);
+}
+
+static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
+                                     const struct mlxsw_event_listener *el_b)
+{
+       return (el_a->func == el_b->func &&
+               el_a->trap_id == el_b->trap_id);
+}
+
+static struct mlxsw_event_listener_item *
+__find_event_listener_item(struct mlxsw_core *mlxsw_core,
+                          const struct mlxsw_event_listener *el,
+                          void *priv)
+{
+       struct mlxsw_event_listener_item *el_item;
+
+       list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
+               if (__is_event_listener_equal(&el_item->el, el) &&
+                   el_item->priv == priv)
+                       return el_item;
+       }
+       return NULL;
+}
+
+int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
+                                      const struct mlxsw_event_listener *el,
+                                      void *priv)
+{
+       int err;
+       struct mlxsw_event_listener_item *el_item;
+       const struct mlxsw_rx_listener rxl = {
+               .func = mlxsw_core_event_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = el->trap_id,
+       };
+
+       el_item = __find_event_listener_item(mlxsw_core, el, priv);
+       if (el_item)
+               return -EEXIST;
+       el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
+       if (!el_item)
+               return -ENOMEM;
+       el_item->el = *el;
+       el_item->priv = priv;
+
+       err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
+       if (err)
+               goto err_rx_listener_register;
+
+       /* No reason to save item if we did not manage to register an RX
+        * listener for it.
+        */
+       list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
+
+       return 0;
+
+err_rx_listener_register:
+       kfree(el_item);
+       return err;
+}
+EXPORT_SYMBOL(mlxsw_core_event_listener_register);
+
+void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
+                                         const struct mlxsw_event_listener *el,
+                                         void *priv)
+{
+       struct mlxsw_event_listener_item *el_item;
+       const struct mlxsw_rx_listener rxl = {
+               .func = mlxsw_core_event_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = el->trap_id,
+       };
+
+       el_item = __find_event_listener_item(mlxsw_core, el, priv);
+       if (!el_item)
+               return;
+       mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
+       list_del(&el_item->list);
+       kfree(el_item);
+}
+EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
+
+static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
+                                     const struct mlxsw_reg_info *reg,
+                                     char *payload,
+                                     enum mlxsw_core_reg_access_type type)
+{
+       int err;
+       char *op_tlv;
+       struct sk_buff *skb;
+       struct mlxsw_tx_info tx_info = {
+               .local_port = MLXSW_PORT_CPU_PORT,
+               .is_emad = true,
+       };
+
+       skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
+       if (!skb)
+               return -ENOMEM;
+
+       mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core);
+       mlxsw_core->driver->txhdr_construct(skb, &tx_info);
+
+       dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n",
+               mlxsw_core->emad.tid);
+       mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len);
+
+       err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info);
+       if (!err) {
+               op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb);
+               memcpy(payload, mlxsw_emad_reg_payload(op_tlv),
+                      reg->len);
+
+               dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n",
+                       mlxsw_core->emad.tid - 1);
+               mlxsw_core_buf_dump_dbg(mlxsw_core,
+                                       mlxsw_core->emad.resp_skb->data,
+                                       skb->len);
+
+               dev_kfree_skb(mlxsw_core->emad.resp_skb);
+       }
+
+       return err;
+}
+
+static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
+                                    const struct mlxsw_reg_info *reg,
+                                    char *payload,
+                                    enum mlxsw_core_reg_access_type type)
+{
+       int err, n_retry;
+       char *in_mbox, *out_mbox, *tmp;
+
+       in_mbox = mlxsw_cmd_mbox_alloc();
+       if (!in_mbox)
+               return -ENOMEM;
+
+       out_mbox = mlxsw_cmd_mbox_alloc();
+       if (!out_mbox) {
+               err = -ENOMEM;
+               goto free_in_mbox;
+       }
+
+       mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core);
+       tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
+       mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
+
+       n_retry = 0;
+retry:
+       err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
+       if (!err) {
+               err = mlxsw_emad_process_status(mlxsw_core, out_mbox);
+               if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
+                       goto retry;
+       }
+
+       if (!err)
+               memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
+                      reg->len);
+
+       mlxsw_core->emad.tid++;
+       mlxsw_cmd_mbox_free(out_mbox);
+free_in_mbox:
+       mlxsw_cmd_mbox_free(in_mbox);
+       return err;
+}
+
+static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
+                                const struct mlxsw_reg_info *reg,
+                                char *payload,
+                                enum mlxsw_core_reg_access_type type)
+{
+       u64 cur_tid;
+       int err;
+
+       if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) {
+               dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n",
+                       reg->id, mlxsw_reg_id_str(reg->id),
+                       mlxsw_core_reg_access_type_str(type));
+               return -EINTR;
+       }
+
+       cur_tid = mlxsw_core->emad.tid;
+       dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
+               cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
+               mlxsw_core_reg_access_type_str(type));
+
+       /* During initialization EMAD interface is not available to us,
+        * so we default to command interface. We switch to EMAD interface
+        * after setting the appropriate traps.
+        */
+       if (!mlxsw_core->emad.use_emad)
+               err = mlxsw_core_reg_access_cmd(mlxsw_core, reg,
+                                               payload, type);
+       else
+               err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
+                                                payload, type);
+
+       if (err)
+               dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n",
+                       cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
+                       mlxsw_core_reg_access_type_str(type));
+
+       mutex_unlock(&mlxsw_core->emad.lock);
+       return err;
+}
+
+int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
+                   const struct mlxsw_reg_info *reg, char *payload)
+{
+       return mlxsw_core_reg_access(mlxsw_core, reg, payload,
+                                    MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
+}
+EXPORT_SYMBOL(mlxsw_reg_query);
+
+int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
+                   const struct mlxsw_reg_info *reg, char *payload)
+{
+       return mlxsw_core_reg_access(mlxsw_core, reg, payload,
+                                    MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
+}
+EXPORT_SYMBOL(mlxsw_reg_write);
+
+void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
+                           struct mlxsw_rx_info *rx_info)
+{
+       struct mlxsw_rx_listener_item *rxl_item;
+       const struct mlxsw_rx_listener *rxl;
+       struct mlxsw_core_pcpu_stats *pcpu_stats;
+       u8 local_port = rx_info->sys_port;
+       bool found = false;
+
+       dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: sys_port = %d, trap_id = 0x%x\n",
+                           __func__, rx_info->sys_port, rx_info->trap_id);
+
+       if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
+           (local_port >= MLXSW_PORT_MAX_PORTS))
+               goto drop;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
+               rxl = &rxl_item->rxl;
+               if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
+                    rxl->local_port == local_port) &&
+                   rxl->trap_id == rx_info->trap_id) {
+                       found = true;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+       if (!found)
+               goto drop;
+
+       pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats);
+       u64_stats_update_begin(&pcpu_stats->syncp);
+       pcpu_stats->port_rx_packets[local_port]++;
+       pcpu_stats->port_rx_bytes[local_port] += skb->len;
+       pcpu_stats->trap_rx_packets[rx_info->trap_id]++;
+       pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len;
+       u64_stats_update_end(&pcpu_stats->syncp);
+
+       rxl->func(skb, local_port, rxl_item->priv);
+       return;
+
+drop:
+       if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX)
+               this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid);
+       else
+               this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]);
+       if (local_port >= MLXSW_PORT_MAX_PORTS)
+               this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid);
+       else
+               this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]);
+       dev_kfree_skb(skb);
+}
+EXPORT_SYMBOL(mlxsw_core_skb_receive);
+
+int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
+                  u32 in_mod, bool out_mbox_direct,
+                  char *in_mbox, size_t in_mbox_size,
+                  char *out_mbox, size_t out_mbox_size)
+{
+       u8 status;
+       int err;
+
+       BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
+       if (!mlxsw_core->bus->cmd_exec)
+               return -EOPNOTSUPP;
+
+       dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
+               opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
+       if (in_mbox) {
+               dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
+               mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
+       }
+
+       err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
+                                       opcode_mod, in_mod, out_mbox_direct,
+                                       in_mbox, in_mbox_size,
+                                       out_mbox, out_mbox_size, &status);
+
+       if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
+               dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
+                       opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
+                       in_mod, status, mlxsw_cmd_status_str(status));
+       } else if (err == -ETIMEDOUT) {
+               dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
+                       opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
+                       in_mod);
+       }
+
+       if (!err && out_mbox) {
+               dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
+               mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
+       }
+       return err;
+}
+EXPORT_SYMBOL(mlxsw_cmd_exec);
+
+static int __init mlxsw_core_module_init(void)
+{
+       mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
+       if (!mlxsw_core_dbg_root)
+               return -ENOMEM;
+       return 0;
+}
+
+static void __exit mlxsw_core_module_exit(void)
+{
+       debugfs_remove_recursive(mlxsw_core_dbg_root);
+}
+
+module_init(mlxsw_core_module_init);
+module_exit(mlxsw_core_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox switch device core driver");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
new file mode 100644 (file)
index 0000000..2280b31
--- /dev/null
@@ -0,0 +1,202 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_CORE_H
+#define _MLXSW_CORE_H
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#include "trap.h"
+#include "reg.h"
+
+#include "cmd.h"
+
+#define MLXSW_MODULE_ALIAS_PREFIX "mlxsw-driver-"
+#define MODULE_MLXSW_DRIVER_ALIAS(kind)        \
+       MODULE_ALIAS(MLXSW_MODULE_ALIAS_PREFIX kind)
+
+#define MLXSW_DEVICE_KIND_SWITCHX2 "switchx2"
+
+struct mlxsw_core;
+struct mlxsw_driver;
+struct mlxsw_bus;
+struct mlxsw_bus_info;
+
+int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
+void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
+
+int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+                                  const struct mlxsw_bus *mlxsw_bus,
+                                  void *bus_priv);
+void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core);
+
+struct mlxsw_tx_info {
+       u8 local_port;
+       bool is_emad;
+};
+
+int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+                           const struct mlxsw_tx_info *tx_info);
+
+struct mlxsw_rx_listener {
+       void (*func)(struct sk_buff *skb, u8 local_port, void *priv);
+       u8 local_port;
+       u16 trap_id;
+};
+
+struct mlxsw_event_listener {
+       void (*func)(const struct mlxsw_reg_info *reg,
+                    char *payload, void *priv);
+       enum mlxsw_event_trap_id trap_id;
+};
+
+int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
+                                   const struct mlxsw_rx_listener *rxl,
+                                   void *priv);
+void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
+                                      const struct mlxsw_rx_listener *rxl,
+                                      void *priv);
+
+int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
+                                      const struct mlxsw_event_listener *el,
+                                      void *priv);
+void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
+                                         const struct mlxsw_event_listener *el,
+                                         void *priv);
+
+int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
+                   const struct mlxsw_reg_info *reg, char *payload);
+int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
+                   const struct mlxsw_reg_info *reg, char *payload);
+
+struct mlxsw_rx_info {
+       u16 sys_port;
+       int trap_id;
+};
+
+void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
+                           struct mlxsw_rx_info *rx_info);
+
+#define MLXSW_CONFIG_PROFILE_SWID_COUNT 8
+
+struct mlxsw_swid_config {
+       u8      used_type:1,
+               used_properties:1;
+       u8      type;
+       u8      properties;
+};
+
+struct mlxsw_config_profile {
+       u16     used_max_vepa_channels:1,
+               used_max_lag:1,
+               used_max_port_per_lag:1,
+               used_max_mid:1,
+               used_max_pgt:1,
+               used_max_system_port:1,
+               used_max_vlan_groups:1,
+               used_max_regions:1,
+               used_flood_tables:1,
+               used_flood_mode:1,
+               used_max_ib_mc:1,
+               used_max_pkey:1,
+               used_ar_sec:1,
+               used_adaptive_routing_group_cap:1;
+       u8      max_vepa_channels;
+       u16     max_lag;
+       u16     max_port_per_lag;
+       u16     max_mid;
+       u16     max_pgt;
+       u16     max_system_port;
+       u16     max_vlan_groups;
+       u16     max_regions;
+       u8      max_flood_tables;
+       u8      max_vid_flood_tables;
+       u8      flood_mode;
+       u16     max_ib_mc;
+       u16     max_pkey;
+       u8      ar_sec;
+       u16     adaptive_routing_group_cap;
+       u8      arn;
+       struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
+};
+
+struct mlxsw_driver {
+       struct list_head list;
+       const char *kind;
+       struct module *owner;
+       size_t priv_size;
+       int (*init)(void *driver_priv, struct mlxsw_core *mlxsw_core,
+                   const struct mlxsw_bus_info *mlxsw_bus_info);
+       void (*fini)(void *driver_priv);
+       void (*txhdr_construct)(struct sk_buff *skb,
+                               const struct mlxsw_tx_info *tx_info);
+       u8 txhdr_len;
+       const struct mlxsw_config_profile *profile;
+};
+
+struct mlxsw_bus {
+       const char *kind;
+       int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core,
+                   const struct mlxsw_config_profile *profile);
+       void (*fini)(void *bus_priv);
+       int (*skb_transmit)(void *bus_priv, struct sk_buff *skb,
+                           const struct mlxsw_tx_info *tx_info);
+       int (*cmd_exec)(void *bus_priv, u16 opcode, u8 opcode_mod,
+                       u32 in_mod, bool out_mbox_direct,
+                       char *in_mbox, size_t in_mbox_size,
+                       char *out_mbox, size_t out_mbox_size,
+                       u8 *p_status);
+};
+
+struct mlxsw_bus_info {
+       const char *device_kind;
+       const char *device_name;
+       struct device *dev;
+       struct {
+               u16 major;
+               u16 minor;
+               u16 subminor;
+       } fw_rev;
+       u8 vsd[MLXSW_CMD_BOARDINFO_VSD_LEN];
+       u8 psid[MLXSW_CMD_BOARDINFO_PSID_LEN];
+};
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/emad.h b/drivers/net/ethernet/mellanox/mlxsw/emad.h
new file mode 100644 (file)
index 0000000..97b6bb5
--- /dev/null
@@ -0,0 +1,127 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/emad.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_EMAD_H
+#define _MLXSW_EMAD_H
+
+#define MLXSW_EMAD_MAX_FRAME_LEN 1518  /* Length in u8 */
+#define MLXSW_EMAD_MAX_RETRY 5
+
+/* EMAD Ethernet header */
+#define MLXSW_EMAD_ETH_HDR_LEN 0x10    /* Length in u8 */
+#define MLXSW_EMAD_EH_DMAC "\x01\x02\xc9\x00\x00\x01"
+#define MLXSW_EMAD_EH_SMAC "\x00\x02\xc9\x01\x02\x03"
+#define MLXSW_EMAD_EH_ETHERTYPE 0x8932
+#define MLXSW_EMAD_EH_MLX_PROTO 0
+#define MLXSW_EMAD_EH_PROTO_VERSION 0
+
+/* EMAD TLV Types */
+enum {
+       MLXSW_EMAD_TLV_TYPE_END,
+       MLXSW_EMAD_TLV_TYPE_OP,
+       MLXSW_EMAD_TLV_TYPE_DR,
+       MLXSW_EMAD_TLV_TYPE_REG,
+       MLXSW_EMAD_TLV_TYPE_USERDATA,
+       MLXSW_EMAD_TLV_TYPE_OOBETH,
+};
+
+/* OP TLV */
+#define MLXSW_EMAD_OP_TLV_LEN 4                /* Length in u32 */
+
+enum {
+       MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS = 1,
+       MLXSW_EMAD_OP_TLV_CLASS_IPC = 2,
+};
+
+enum mlxsw_emad_op_tlv_status {
+       MLXSW_EMAD_OP_TLV_STATUS_SUCCESS,
+       MLXSW_EMAD_OP_TLV_STATUS_BUSY,
+       MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED,
+       MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV,
+       MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED,
+       MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED,
+       MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED,
+       MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER,
+       MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE,
+       MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK,
+       MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR = 0x70,
+};
+
+static inline char *mlxsw_emad_op_tlv_status_str(u8 status)
+{
+       switch (status) {
+       case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
+               return "operation performed";
+       case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
+               return "device is busy";
+       case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
+               return "version not supported";
+       case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
+               return "unknown TLV";
+       case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
+               return "register not supported";
+       case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
+               return "class not supported";
+       case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
+               return "method not supported";
+       case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
+               return "bad parameter";
+       case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
+               return "resource not available";
+       case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
+               return "acknowledged. retransmit";
+       case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
+               return "internal error";
+       default:
+               return "*UNKNOWN*";
+       }
+}
+
+enum {
+       MLXSW_EMAD_OP_TLV_REQUEST,
+       MLXSW_EMAD_OP_TLV_RESPONSE
+};
+
+enum {
+       MLXSW_EMAD_OP_TLV_METHOD_QUERY = 1,
+       MLXSW_EMAD_OP_TLV_METHOD_WRITE = 2,
+       MLXSW_EMAD_OP_TLV_METHOD_SEND = 3,
+       MLXSW_EMAD_OP_TLV_METHOD_EVENT = 5,
+};
+
+/* END TLV */
+#define MLXSW_EMAD_END_TLV_LEN 1       /* Length in u32 */
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
new file mode 100644 (file)
index 0000000..4d0ac88
--- /dev/null
@@ -0,0 +1,405 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/item.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_ITEM_H
+#define _MLXSW_ITEM_H
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+
+struct mlxsw_item {
+       unsigned short  offset;         /* bytes in container */
+       unsigned short  step;           /* step in bytes for indexed items */
+       unsigned short  in_step_offset; /* offset within one step */
+       unsigned char   shift;          /* shift in bits */
+       unsigned char   element_size;   /* size of element in bit array */
+       bool            no_real_shift;
+       union {
+               unsigned char   bits;
+               unsigned short  bytes;
+       } size;
+       const char      *name;
+};
+
+static inline unsigned int
+__mlxsw_item_offset(struct mlxsw_item *item, unsigned short index,
+                   size_t typesize)
+{
+       BUG_ON(index && !item->step);
+       if (item->offset % typesize != 0 ||
+           item->step % typesize != 0 ||
+           item->in_step_offset % typesize != 0) {
+               pr_err("mlxsw: item bug (name=%s,offset=%x,step=%x,in_step_offset=%x,typesize=%lx)\n",
+                      item->name, item->offset, item->step,
+                      item->in_step_offset, typesize);
+               BUG();
+       }
+
+       return ((item->offset + item->step * index + item->in_step_offset) /
+               typesize);
+}
+
+static inline u16 __mlxsw_item_get16(char *buf, struct mlxsw_item *item,
+                                    unsigned short index)
+{
+       unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u16));
+       __be16 *b = (__be16 *) buf;
+       u16 tmp;
+
+       tmp = be16_to_cpu(b[offset]);
+       tmp >>= item->shift;
+       tmp &= GENMASK(item->size.bits - 1, 0);
+       if (item->no_real_shift)
+               tmp <<= item->shift;
+       return tmp;
+}
+
+static inline void __mlxsw_item_set16(char *buf, struct mlxsw_item *item,
+                                     unsigned short index, u16 val)
+{
+       unsigned int offset = __mlxsw_item_offset(item, index,
+                                                 sizeof(u16));
+       __be16 *b = (__be16 *) buf;
+       u16 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
+       u16 tmp;
+
+       if (!item->no_real_shift)
+               val <<= item->shift;
+       val &= mask;
+       tmp = be16_to_cpu(b[offset]);
+       tmp &= ~mask;
+       tmp |= val;
+       b[offset] = cpu_to_be16(tmp);
+}
+
+static inline u32 __mlxsw_item_get32(char *buf, struct mlxsw_item *item,
+                                    unsigned short index)
+{
+       unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u32));
+       __be32 *b = (__be32 *) buf;
+       u32 tmp;
+
+       tmp = be32_to_cpu(b[offset]);
+       tmp >>= item->shift;
+       tmp &= GENMASK(item->size.bits - 1, 0);
+       if (item->no_real_shift)
+               tmp <<= item->shift;
+       return tmp;
+}
+
+static inline void __mlxsw_item_set32(char *buf, struct mlxsw_item *item,
+                                     unsigned short index, u32 val)
+{
+       unsigned int offset = __mlxsw_item_offset(item, index,
+                                                 sizeof(u32));
+       __be32 *b = (__be32 *) buf;
+       u32 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
+       u32 tmp;
+
+       if (!item->no_real_shift)
+               val <<= item->shift;
+       val &= mask;
+       tmp = be32_to_cpu(b[offset]);
+       tmp &= ~mask;
+       tmp |= val;
+       b[offset] = cpu_to_be32(tmp);
+}
+
+static inline u64 __mlxsw_item_get64(char *buf, struct mlxsw_item *item,
+                                    unsigned short index)
+{
+       unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
+       __be64 *b = (__be64 *) buf;
+       u64 tmp;
+
+       tmp = be64_to_cpu(b[offset]);
+       tmp >>= item->shift;
+       tmp &= GENMASK_ULL(item->size.bits - 1, 0);
+       if (item->no_real_shift)
+               tmp <<= item->shift;
+       return tmp;
+}
+
+static inline void __mlxsw_item_set64(char *buf, struct mlxsw_item *item,
+                                     unsigned short index, u64 val)
+{
+       unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u64));
+       __be64 *b = (__be64 *) buf;
+       u64 mask = GENMASK_ULL(item->size.bits - 1, 0) << item->shift;
+       u64 tmp;
+
+       if (!item->no_real_shift)
+               val <<= item->shift;
+       val &= mask;
+       tmp = be64_to_cpu(b[offset]);
+       tmp &= ~mask;
+       tmp |= val;
+       b[offset] = cpu_to_be64(tmp);
+}
+
+static inline void __mlxsw_item_memcpy_from(char *buf, char *dst,
+                                           struct mlxsw_item *item)
+{
+       memcpy(dst, &buf[item->offset], item->size.bytes);
+}
+
+static inline void __mlxsw_item_memcpy_to(char *buf, char *src,
+                                         struct mlxsw_item *item)
+{
+       memcpy(&buf[item->offset], src, item->size.bytes);
+}
+
+static inline u16
+__mlxsw_item_bit_array_offset(struct mlxsw_item *item, u16 index, u8 *shift)
+{
+       u16 max_index, be_index;
+       u16 offset;             /* byte offset inside the array */
+
+       BUG_ON(index && !item->element_size);
+       if (item->offset % sizeof(u32) != 0 ||
+           BITS_PER_BYTE % item->element_size != 0) {
+               pr_err("mlxsw: item bug (name=%s,offset=%x,element_size=%x)\n",
+                      item->name, item->offset, item->element_size);
+               BUG();
+       }
+
+       max_index = (item->size.bytes << 3) / item->element_size - 1;
+       be_index = max_index - index;
+       offset = be_index * item->element_size >> 3;
+       *shift = index % (BITS_PER_BYTE / item->element_size) << 1;
+
+       return item->offset + offset;
+}
+
+static inline u8 __mlxsw_item_bit_array_get(char *buf, struct mlxsw_item *item,
+                                           u16 index)
+{
+       u8 shift, tmp;
+       u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
+
+       tmp = buf[offset];
+       tmp >>= shift;
+       tmp &= GENMASK(item->element_size - 1, 0);
+       return tmp;
+}
+
+static inline void __mlxsw_item_bit_array_set(char *buf, struct mlxsw_item *item,
+                                             u16 index, u8 val)
+{
+       u8 shift, tmp;
+       u16 offset = __mlxsw_item_bit_array_offset(item, index, &shift);
+       u8 mask = GENMASK(item->element_size - 1, 0) << shift;
+
+       val <<= shift;
+       val &= mask;
+       tmp = buf[offset];
+       tmp &= ~mask;
+       tmp |= val;
+       buf[offset] = tmp;
+}
+
+#define __ITEM_NAME(_type, _cname, _iname)                                     \
+       mlxsw_##_type##_##_cname##_##_iname##_item
+
+/* _type: cmd_mbox, reg, etc.
+ * _cname: containter name (e.g. command name, register name)
+ * _iname: item name within the container
+ */
+
+#define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits)                \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {                        \
+       .offset = _offset,                                                      \
+       .shift = _shift,                                                        \
+       .size = {.bits = _sizebits,},                                           \
+       .name = #_type "_" #_cname "_" #_iname,                                 \
+};                                                                             \
+static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf)         \
+{                                                                              \
+       return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+}                                                                              \
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val)\
+{                                                                              \
+       __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val);   \
+}
+
+#define MLXSW_ITEM16_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits,        \
+                            _step, _instepoffset, _norealshift)                \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {                        \
+       .offset = _offset,                                                      \
+       .step = _step,                                                          \
+       .in_step_offset = _instepoffset,                                        \
+       .shift = _shift,                                                        \
+       .no_real_shift = _norealshift,                                          \
+       .size = {.bits = _sizebits,},                                           \
+       .name = #_type "_" #_cname "_" #_iname,                                 \
+};                                                                             \
+static inline u16                                                              \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index)     \
+{                                                                              \
+       return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname),     \
+                                 index);                                       \
+}                                                                              \
+static inline void                                                             \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index,     \
+                                         u16 val)                              \
+{                                                                              \
+       __mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname),            \
+                          index, val);                                         \
+}
+
+#define MLXSW_ITEM32(_type, _cname, _iname, _offset, _shift, _sizebits)                \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {                        \
+       .offset = _offset,                                                      \
+       .shift = _shift,                                                        \
+       .size = {.bits = _sizebits,},                                           \
+       .name = #_type "_" #_cname "_" #_iname,                                 \
+};                                                                             \
+static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf)         \
+{                                                                              \
+       return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+}                                                                              \
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val)\
+{                                                                              \
+       __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val);   \
+}
+
+#define MLXSW_ITEM32_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits,        \
+                            _step, _instepoffset, _norealshift)                \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {                        \
+       .offset = _offset,                                                      \
+       .step = _step,                                                          \
+       .in_step_offset = _instepoffset,                                        \
+       .shift = _shift,                                                        \
+       .no_real_shift = _norealshift,                                          \
+       .size = {.bits = _sizebits,},                                           \
+       .name = #_type "_" #_cname "_" #_iname,                                 \
+};                                                                             \
+static inline u32                                                              \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index)     \
+{                                                                              \
+       return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname),     \
+                                 index);                                       \
+}                                                                              \
+static inline void                                                             \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index,     \
+                                         u32 val)                              \
+{                                                                              \
+       __mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname),            \
+                          index, val);                                         \
+}
+
+#define MLXSW_ITEM64(_type, _cname, _iname, _offset, _shift, _sizebits)                \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {                        \
+       .offset = _offset,                                                      \
+       .shift = _shift,                                                        \
+       .size = {.bits = _sizebits,},                                           \
+       .name = #_type "_" #_cname "_" #_iname,                                 \
+};                                                                             \
+static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(char *buf)         \
+{                                                                              \
+       return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
+}                                                                              \
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val)\
+{                                                                              \
+       __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val);   \
+}
+
+#define MLXSW_ITEM64_INDEXED(_type, _cname, _iname, _offset, _shift,           \
+                            _sizebits, _step, _instepoffset, _norealshift)     \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {                        \
+       .offset = _offset,                                                      \
+       .step = _step,                                                          \
+       .in_step_offset = _instepoffset,                                        \
+       .shift = _shift,                                                        \
+       .no_real_shift = _norealshift,                                          \
+       .size = {.bits = _sizebits,},                                           \
+       .name = #_type "_" #_cname "_" #_iname,                                 \
+};                                                                             \
+static inline u64                                                              \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, unsigned short index)     \
+{                                                                              \
+       return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname),     \
+                                 index);                                       \
+}                                                                              \
+static inline void                                                             \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index,     \
+                                         u64 val)                              \
+{                                                                              \
+       __mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname),            \
+                          index, val);                                         \
+}
+
+#define MLXSW_ITEM_BUF(_type, _cname, _iname, _offset, _sizebytes)             \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {                        \
+       .offset = _offset,                                                      \
+       .size = {.bytes = _sizebytes,},                                         \
+       .name = #_type "_" #_cname "_" #_iname,                                 \
+};                                                                             \
+static inline void                                                             \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst)                \
+{                                                                              \
+       __mlxsw_item_memcpy_from(buf, dst, &__ITEM_NAME(_type, _cname, _iname));\
+}                                                                              \
+static inline void                                                             \
+mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, char *src)          \
+{                                                                              \
+       __mlxsw_item_memcpy_to(buf, src, &__ITEM_NAME(_type, _cname, _iname));  \
+}
+
+#define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes,       \
+                            _element_size)                                     \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {                        \
+       .offset = _offset,                                                      \
+       .element_size = _element_size,                                          \
+       .size = {.bytes = _sizebytes,},                                         \
+       .name = #_type "_" #_cname "_" #_iname,                                 \
+};                                                                             \
+static inline u8                                                               \
+mlxsw_##_type##_##_cname##_##_iname##_get(char *buf, u16 index)                        \
+{                                                                              \
+       return __mlxsw_item_bit_array_get(buf,                                  \
+                                         &__ITEM_NAME(_type, _cname, _iname),  \
+                                         index);                               \
+}                                                                              \
+static inline void                                                             \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val)                \
+{                                                                              \
+       return __mlxsw_item_bit_array_set(buf,                                  \
+                                         &__ITEM_NAME(_type, _cname, _iname),  \
+                                         index, val);                          \
+}                                                                              \
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
new file mode 100644 (file)
index 0000000..298ead5
--- /dev/null
@@ -0,0 +1,1794 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/pci.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/wait.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/log2.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "pci.h"
+#include "core.h"
+#include "cmd.h"
+#include "port.h"
+
+static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
+
+static const struct pci_device_id mlxsw_pci_id_table[] = {
+       {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
+       {0, }
+};
+
+static struct dentry *mlxsw_pci_dbg_root;
+
+static const char *mlxsw_pci_device_kind_get(const struct pci_device_id *id)
+{
+       switch (id->device) {
+       case PCI_DEVICE_ID_MELLANOX_SWITCHX2:
+               return MLXSW_DEVICE_KIND_SWITCHX2;
+       default:
+               BUG();
+       }
+}
+
+#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
+       iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
+#define mlxsw_pci_read32(mlxsw_pci, reg) \
+       ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
+
+enum mlxsw_pci_queue_type {
+       MLXSW_PCI_QUEUE_TYPE_SDQ,
+       MLXSW_PCI_QUEUE_TYPE_RDQ,
+       MLXSW_PCI_QUEUE_TYPE_CQ,
+       MLXSW_PCI_QUEUE_TYPE_EQ,
+};
+
+static const char *mlxsw_pci_queue_type_str(enum mlxsw_pci_queue_type q_type)
+{
+       switch (q_type) {
+       case MLXSW_PCI_QUEUE_TYPE_SDQ:
+               return "sdq";
+       case MLXSW_PCI_QUEUE_TYPE_RDQ:
+               return "rdq";
+       case MLXSW_PCI_QUEUE_TYPE_CQ:
+               return "cq";
+       case MLXSW_PCI_QUEUE_TYPE_EQ:
+               return "eq";
+       }
+       BUG();
+}
+
+#define MLXSW_PCI_QUEUE_TYPE_COUNT     4
+
+static const u16 mlxsw_pci_doorbell_type_offset[] = {
+       MLXSW_PCI_DOORBELL_SDQ_OFFSET,  /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
+       MLXSW_PCI_DOORBELL_RDQ_OFFSET,  /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
+       MLXSW_PCI_DOORBELL_CQ_OFFSET,   /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
+       MLXSW_PCI_DOORBELL_EQ_OFFSET,   /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
+};
+
+static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
+       0, /* unused */
+       0, /* unused */
+       MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
+       MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
+};
+
+struct mlxsw_pci_mem_item {
+       char *buf;
+       dma_addr_t mapaddr;
+       size_t size;
+};
+
+struct mlxsw_pci_queue_elem_info {
+       char *elem; /* pointer to actual dma mapped element mem chunk */
+       union {
+               struct {
+                       struct sk_buff *skb;
+               } sdq;
+               struct {
+                       struct sk_buff *skb;
+               } rdq;
+       } u;
+};
+
+struct mlxsw_pci_queue {
+       spinlock_t lock; /* for queue accesses */
+       struct mlxsw_pci_mem_item mem_item;
+       struct mlxsw_pci_queue_elem_info *elem_info;
+       u16 producer_counter;
+       u16 consumer_counter;
+       u16 count; /* number of elements in queue */
+       u8 num; /* queue number */
+       u8 elem_size; /* size of one element */
+       enum mlxsw_pci_queue_type type;
+       struct tasklet_struct tasklet; /* queue processing tasklet */
+       struct mlxsw_pci *pci;
+       union {
+               struct {
+                       u32 comp_sdq_count;
+                       u32 comp_rdq_count;
+               } cq;
+               struct {
+                       u32 ev_cmd_count;
+                       u32 ev_comp_count;
+                       u32 ev_other_count;
+               } eq;
+       } u;
+};
+
+struct mlxsw_pci_queue_type_group {
+       struct mlxsw_pci_queue *q;
+       u8 count; /* number of queues in group */
+};
+
+struct mlxsw_pci {
+       struct pci_dev *pdev;
+       u8 __iomem *hw_addr;
+       struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
+       u32 doorbell_offset;
+       struct msix_entry msix_entry;
+       struct mlxsw_core *core;
+       struct {
+               u16 num_pages;
+               struct mlxsw_pci_mem_item *items;
+       } fw_area;
+       struct {
+               struct mutex lock; /* Lock access to command registers */
+               bool nopoll;
+               wait_queue_head_t wait;
+               bool wait_done;
+               struct {
+                       u8 status;
+                       u64 out_param;
+               } comp;
+       } cmd;
+       struct mlxsw_bus_info bus_info;
+       struct dentry *dbg_dir;
+};
+
+static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
+{
+       tasklet_schedule(&q->tasklet);
+}
+
+static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
+                                       size_t elem_size, int elem_index)
+{
+       return q->mem_item.buf + (elem_size * elem_index);
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
+{
+       return &q->elem_info[elem_index];
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
+{
+       int index = q->producer_counter & (q->count - 1);
+
+       if ((q->producer_counter - q->consumer_counter) == q->count)
+               return NULL;
+       return mlxsw_pci_queue_elem_info_get(q, index);
+}
+
+static struct mlxsw_pci_queue_elem_info *
+mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
+{
+       int index = q->consumer_counter & (q->count - 1);
+
+       return mlxsw_pci_queue_elem_info_get(q, index);
+}
+
+static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
+{
+       return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
+}
+
+static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
+{
+       return owner_bit != !!(q->consumer_counter & q->count);
+}
+
+static char *mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q,
+                                        u32 (*get_elem_owner_func)(char *))
+{
+       struct mlxsw_pci_queue_elem_info *elem_info;
+       char *elem;
+       bool owner_bit;
+
+       elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+       elem = elem_info->elem;
+       owner_bit = get_elem_owner_func(elem);
+       if (mlxsw_pci_elem_hw_owned(q, owner_bit))
+               return NULL;
+       q->consumer_counter++;
+       rmb(); /* make sure we read owned bit before the rest of elem */
+       return elem;
+}
+
+static struct mlxsw_pci_queue_type_group *
+mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
+                              enum mlxsw_pci_queue_type q_type)
+{
+       return &mlxsw_pci->queues[q_type];
+}
+
+static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
+                                 enum mlxsw_pci_queue_type q_type)
+{
+       struct mlxsw_pci_queue_type_group *queue_group;
+
+       queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
+       return queue_group->count;
+}
+
+static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
+{
+       return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
+}
+
+static u8 mlxsw_pci_rdq_count(struct mlxsw_pci *mlxsw_pci)
+{
+       return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_RDQ);
+}
+
+static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
+{
+       return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
+}
+
+static u8 mlxsw_pci_eq_count(struct mlxsw_pci *mlxsw_pci)
+{
+       return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ);
+}
+
+static struct mlxsw_pci_queue *
+__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
+                     enum mlxsw_pci_queue_type q_type, u8 q_num)
+{
+       return &mlxsw_pci->queues[q_type].q[q_num];
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
+                                                u8 q_num)
+{
+       return __mlxsw_pci_queue_get(mlxsw_pci,
+                                    MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
+                                                u8 q_num)
+{
+       return __mlxsw_pci_queue_get(mlxsw_pci,
+                                    MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
+                                               u8 q_num)
+{
+       return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
+}
+
+static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
+                                               u8 q_num)
+{
+       return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
+}
+
+static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
+                                          struct mlxsw_pci_queue *q,
+                                          u16 val)
+{
+       mlxsw_pci_write32(mlxsw_pci,
+                         DOORBELL(mlxsw_pci->doorbell_offset,
+                                  mlxsw_pci_doorbell_type_offset[q->type],
+                                  q->num), val);
+}
+
+static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
+                                              struct mlxsw_pci_queue *q,
+                                              u16 val)
+{
+       mlxsw_pci_write32(mlxsw_pci,
+                         DOORBELL(mlxsw_pci->doorbell_offset,
+                                  mlxsw_pci_doorbell_arm_type_offset[q->type],
+                                  q->num), val);
+}
+
+static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
+                                                  struct mlxsw_pci_queue *q)
+{
+       wmb(); /* ensure all writes are done before we ring a bell */
+       __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
+}
+
+static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
+                                                  struct mlxsw_pci_queue *q)
+{
+       wmb(); /* ensure all writes are done before we ring a bell */
+       __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
+                                      q->consumer_counter + q->count);
+}
+
+static void
+mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
+                                          struct mlxsw_pci_queue *q)
+{
+       wmb(); /* ensure all writes are done before we ring a bell */
+       __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
+}
+
+static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
+                                            int page_index)
+{
+       return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
+}
+
+static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+                             struct mlxsw_pci_queue *q)
+{
+       int i;
+       int err;
+
+       q->producer_counter = 0;
+       q->consumer_counter = 0;
+
+       /* Set CQ of same number of this SDQ. */
+       mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
+       mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 7);
+       mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
+       for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+               dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+               mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
+       }
+
+       err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
+       if (err)
+               return err;
+       mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+       return 0;
+}
+
+static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
+                              struct mlxsw_pci_queue *q)
+{
+       mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_sdq_dbg_read(struct seq_file *file, void *data)
+{
+       struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+       struct mlxsw_pci_queue *q;
+       int i;
+       static const char hdr[] =
+               "NUM PROD_COUNT CONS_COUNT COUNT\n";
+
+       seq_printf(file, hdr);
+       for (i = 0; i < mlxsw_pci_sdq_count(mlxsw_pci); i++) {
+               q = mlxsw_pci_sdq_get(mlxsw_pci, i);
+               spin_lock_bh(&q->lock);
+               seq_printf(file, "%3d %10d %10d %5d\n",
+                          i, q->producer_counter, q->consumer_counter,
+                          q->count);
+               spin_unlock_bh(&q->lock);
+       }
+       return 0;
+}
+
+static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
+                                 int index, char *frag_data, size_t frag_len,
+                                 int direction)
+{
+       struct pci_dev *pdev = mlxsw_pci->pdev;
+       dma_addr_t mapaddr;
+
+       mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
+       if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
+               if (net_ratelimit())
+                       dev_err(&pdev->dev, "failed to dma map tx frag\n");
+               return -EIO;
+       }
+       mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
+       mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
+       return 0;
+}
+
+static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
+                                    int index, int direction)
+{
+       struct pci_dev *pdev = mlxsw_pci->pdev;
+       size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
+       dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
+
+       if (!frag_len)
+               return;
+       pci_unmap_single(pdev, mapaddr, frag_len, direction);
+}
+
+static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
+                                  struct mlxsw_pci_queue_elem_info *elem_info)
+{
+       size_t buf_len = MLXSW_PORT_MAX_MTU;
+       char *wqe = elem_info->elem;
+       struct sk_buff *skb;
+       int err;
+
+       elem_info->u.rdq.skb = NULL;
+       skb = netdev_alloc_skb_ip_align(NULL, buf_len);
+       if (!skb)
+               return -ENOMEM;
+
+       /* Assume that wqe was previously zeroed. */
+
+       err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
+                                    buf_len, DMA_FROM_DEVICE);
+       if (err)
+               goto err_frag_map;
+
+       elem_info->u.rdq.skb = skb;
+       return 0;
+
+err_frag_map:
+       dev_kfree_skb_any(skb);
+       return err;
+}
+
+static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
+                                  struct mlxsw_pci_queue_elem_info *elem_info)
+{
+       struct sk_buff *skb;
+       char *wqe;
+
+       skb = elem_info->u.rdq.skb;
+       wqe = elem_info->elem;
+
+       mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+       dev_kfree_skb_any(skb);
+}
+
+static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+                             struct mlxsw_pci_queue *q)
+{
+       struct mlxsw_pci_queue_elem_info *elem_info;
+       int i;
+       int err;
+
+       q->producer_counter = 0;
+       q->consumer_counter = 0;
+
+       /* Set CQ of same number of this RDQ with base
+        * above MLXSW_PCI_SDQS_MAX as the lower ones are assigned to SDQs.
+        */
+       mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num + MLXSW_PCI_SDQS_COUNT);
+       mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
+       for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+               dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+               mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
+       }
+
+       err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
+       if (err)
+               return err;
+
+       mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+
+       for (i = 0; i < q->count; i++) {
+               elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
+               BUG_ON(!elem_info);
+               err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+               if (err)
+                       goto rollback;
+               /* Everything is set up, ring doorbell to pass elem to HW */
+               q->producer_counter++;
+               mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+       }
+
+       return 0;
+
+rollback:
+       for (i--; i >= 0; i--) {
+               elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+               mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+       }
+       mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
+
+       return err;
+}
+
+static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
+                              struct mlxsw_pci_queue *q)
+{
+       struct mlxsw_pci_queue_elem_info *elem_info;
+       int i;
+
+       mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
+       for (i = 0; i < q->count; i++) {
+               elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+               mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+       }
+}
+
+static int mlxsw_pci_rdq_dbg_read(struct seq_file *file, void *data)
+{
+       struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+       struct mlxsw_pci_queue *q;
+       int i;
+       static const char hdr[] =
+               "NUM PROD_COUNT CONS_COUNT COUNT\n";
+
+       seq_printf(file, hdr);
+       for (i = 0; i < mlxsw_pci_rdq_count(mlxsw_pci); i++) {
+               q = mlxsw_pci_rdq_get(mlxsw_pci, i);
+               spin_lock_bh(&q->lock);
+               seq_printf(file, "%3d %10d %10d %5d\n",
+                          i, q->producer_counter, q->consumer_counter,
+                          q->count);
+               spin_unlock_bh(&q->lock);
+       }
+       return 0;
+}
+
+static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+                            struct mlxsw_pci_queue *q)
+{
+       int i;
+       int err;
+
+       q->consumer_counter = 0;
+
+       for (i = 0; i < q->count; i++) {
+               char *elem = mlxsw_pci_queue_elem_get(q, i);
+
+               mlxsw_pci_cqe_owner_set(elem, 1);
+       }
+
+       mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
+       mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
+       mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox, 0);
+       mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
+       mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
+       for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+               dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+               mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
+       }
+       err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
+       if (err)
+               return err;
+       mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+       mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+       return 0;
+}
+
+static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
+                             struct mlxsw_pci_queue *q)
+{
+       mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_cq_dbg_read(struct seq_file *file, void *data)
+{
+       struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+
+       struct mlxsw_pci_queue *q;
+       int i;
+       static const char hdr[] =
+               "NUM CONS_INDEX  SDQ_COUNT  RDQ_COUNT COUNT\n";
+
+       seq_printf(file, hdr);
+       for (i = 0; i < mlxsw_pci_cq_count(mlxsw_pci); i++) {
+               q = mlxsw_pci_cq_get(mlxsw_pci, i);
+               spin_lock_bh(&q->lock);
+               seq_printf(file, "%3d %10d %10d %10d %5d\n",
+                          i, q->consumer_counter, q->u.cq.comp_sdq_count,
+                          q->u.cq.comp_rdq_count, q->count);
+               spin_unlock_bh(&q->lock);
+       }
+       return 0;
+}
+
+static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
+                                    struct mlxsw_pci_queue *q,
+                                    u16 consumer_counter_limit,
+                                    char *cqe)
+{
+       struct pci_dev *pdev = mlxsw_pci->pdev;
+       struct mlxsw_pci_queue_elem_info *elem_info;
+       char *wqe;
+       struct sk_buff *skb;
+       int i;
+
+       spin_lock(&q->lock);
+       elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+       skb = elem_info->u.sdq.skb;
+       wqe = elem_info->elem;
+       for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
+               mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
+       dev_kfree_skb_any(skb);
+       elem_info->u.sdq.skb = NULL;
+
+       if (q->consumer_counter++ != consumer_counter_limit)
+               dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
+       spin_unlock(&q->lock);
+}
+
+static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
+                                    struct mlxsw_pci_queue *q,
+                                    u16 consumer_counter_limit,
+                                    char *cqe)
+{
+       struct pci_dev *pdev = mlxsw_pci->pdev;
+       struct mlxsw_pci_queue_elem_info *elem_info;
+       char *wqe;
+       struct sk_buff *skb;
+       struct mlxsw_rx_info rx_info;
+       int err;
+
+       elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+       skb = elem_info->u.sdq.skb;
+       if (!skb)
+               return;
+       wqe = elem_info->elem;
+       mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+
+       if (q->consumer_counter++ != consumer_counter_limit)
+               dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
+
+       /* We do not support lag now */
+       if (mlxsw_pci_cqe_lag_get(cqe))
+               goto drop;
+
+       rx_info.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
+       rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
+
+       skb_put(skb, mlxsw_pci_cqe_byte_count_get(cqe));
+       mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
+
+put_new_skb:
+       memset(wqe, 0, q->elem_size);
+       err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+       if (err && net_ratelimit())
+               dev_dbg(&pdev->dev, "Failed to alloc skb for RDQ\n");
+       /* Everything is set up, ring doorbell to pass elem to HW */
+       q->producer_counter++;
+       mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+       return;
+
+drop:
+       dev_kfree_skb_any(skb);
+       goto put_new_skb;
+}
+
+static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
+{
+       return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get);
+}
+
+static void mlxsw_pci_cq_tasklet(unsigned long data)
+{
+       struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+       struct mlxsw_pci *mlxsw_pci = q->pci;
+       char *cqe;
+       int items = 0;
+       int credits = q->count >> 1;
+
+       while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
+               u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
+               u8 sendq = mlxsw_pci_cqe_sr_get(cqe);
+               u8 dqn = mlxsw_pci_cqe_dqn_get(cqe);
+
+               if (sendq) {
+                       struct mlxsw_pci_queue *sdq;
+
+                       sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
+                       mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
+                                                wqe_counter, cqe);
+                       q->u.cq.comp_sdq_count++;
+               } else {
+                       struct mlxsw_pci_queue *rdq;
+
+                       rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
+                       mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
+                                                wqe_counter, cqe);
+                       q->u.cq.comp_rdq_count++;
+               }
+               if (++items == credits)
+                       break;
+       }
+       if (items) {
+               mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+               mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+       }
+}
+
+static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+                            struct mlxsw_pci_queue *q)
+{
+       int i;
+       int err;
+
+       q->consumer_counter = 0;
+
+       for (i = 0; i < q->count; i++) {
+               char *elem = mlxsw_pci_queue_elem_get(q, i);
+
+               mlxsw_pci_eqe_owner_set(elem, 1);
+       }
+
+       mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
+       mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox, 0);
+       mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
+       mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
+       for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+               dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+               mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
+       }
+       err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
+       if (err)
+               return err;
+       mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+       mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+       return 0;
+}
+
+static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
+                             struct mlxsw_pci_queue *q)
+{
+       mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
+}
+
+static int mlxsw_pci_eq_dbg_read(struct seq_file *file, void *data)
+{
+       struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
+       struct mlxsw_pci_queue *q;
+       int i;
+       static const char hdr[] =
+               "NUM CONS_COUNT     EV_CMD    EV_COMP   EV_OTHER COUNT\n";
+
+       seq_printf(file, hdr);
+       for (i = 0; i < mlxsw_pci_eq_count(mlxsw_pci); i++) {
+               q = mlxsw_pci_eq_get(mlxsw_pci, i);
+               spin_lock_bh(&q->lock);
+               seq_printf(file, "%3d %10d %10d %10d %10d %5d\n",
+                          i, q->consumer_counter, q->u.eq.ev_cmd_count,
+                          q->u.eq.ev_comp_count, q->u.eq.ev_other_count,
+                          q->count);
+               spin_unlock_bh(&q->lock);
+       }
+       return 0;
+}
+
+static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
+{
+       mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
+       mlxsw_pci->cmd.comp.out_param =
+               ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
+               mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
+       mlxsw_pci->cmd.wait_done = true;
+       wake_up(&mlxsw_pci->cmd.wait);
+}
+
+static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
+{
+       return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get);
+}
+
+static void mlxsw_pci_eq_tasklet(unsigned long data)
+{
+       struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+       struct mlxsw_pci *mlxsw_pci = q->pci;
+       unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_COUNT)];
+       char *eqe;
+       u8 cqn;
+       bool cq_handle = false;
+       int items = 0;
+       int credits = q->count >> 1;
+
+       memset(&active_cqns, 0, sizeof(active_cqns));
+
+       while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
+               u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
+
+               switch (event_type) {
+               case MLXSW_PCI_EQE_EVENT_TYPE_CMD:
+                       mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
+                       q->u.eq.ev_cmd_count++;
+                       break;
+               case MLXSW_PCI_EQE_EVENT_TYPE_COMP:
+                       cqn = mlxsw_pci_eqe_cqn_get(eqe);
+                       set_bit(cqn, active_cqns);
+                       cq_handle = true;
+                       q->u.eq.ev_comp_count++;
+                       break;
+               default:
+                       q->u.eq.ev_other_count++;
+               }
+               if (++items == credits)
+                       break;
+       }
+       if (items) {
+               mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+               mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+       }
+
+       if (!cq_handle)
+               return;
+       for_each_set_bit(cqn, active_cqns, MLXSW_PCI_CQS_COUNT) {
+               q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
+               mlxsw_pci_queue_tasklet_schedule(q);
+       }
+}
+
+struct mlxsw_pci_queue_ops {
+       const char *name;
+       enum mlxsw_pci_queue_type type;
+       int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
+                   struct mlxsw_pci_queue *q);
+       void (*fini)(struct mlxsw_pci *mlxsw_pci,
+                    struct mlxsw_pci_queue *q);
+       void (*tasklet)(unsigned long data);
+       int (*dbg_read)(struct seq_file *s, void *data);
+       u16 elem_count;
+       u8 elem_size;
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
+       .type           = MLXSW_PCI_QUEUE_TYPE_SDQ,
+       .init           = mlxsw_pci_sdq_init,
+       .fini           = mlxsw_pci_sdq_fini,
+       .dbg_read       = mlxsw_pci_sdq_dbg_read,
+       .elem_count     = MLXSW_PCI_WQE_COUNT,
+       .elem_size      = MLXSW_PCI_WQE_SIZE,
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
+       .type           = MLXSW_PCI_QUEUE_TYPE_RDQ,
+       .init           = mlxsw_pci_rdq_init,
+       .fini           = mlxsw_pci_rdq_fini,
+       .dbg_read       = mlxsw_pci_rdq_dbg_read,
+       .elem_count     = MLXSW_PCI_WQE_COUNT,
+       .elem_size      = MLXSW_PCI_WQE_SIZE
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
+       .type           = MLXSW_PCI_QUEUE_TYPE_CQ,
+       .init           = mlxsw_pci_cq_init,
+       .fini           = mlxsw_pci_cq_fini,
+       .tasklet        = mlxsw_pci_cq_tasklet,
+       .dbg_read       = mlxsw_pci_cq_dbg_read,
+       .elem_count     = MLXSW_PCI_CQE_COUNT,
+       .elem_size      = MLXSW_PCI_CQE_SIZE
+};
+
+static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
+       .type           = MLXSW_PCI_QUEUE_TYPE_EQ,
+       .init           = mlxsw_pci_eq_init,
+       .fini           = mlxsw_pci_eq_fini,
+       .tasklet        = mlxsw_pci_eq_tasklet,
+       .dbg_read       = mlxsw_pci_eq_dbg_read,
+       .elem_count     = MLXSW_PCI_EQE_COUNT,
+       .elem_size      = MLXSW_PCI_EQE_SIZE
+};
+
+static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+                               const struct mlxsw_pci_queue_ops *q_ops,
+                               struct mlxsw_pci_queue *q, u8 q_num)
+{
+       struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
+       int i;
+       int err;
+
+       spin_lock_init(&q->lock);
+       q->num = q_num;
+       q->count = q_ops->elem_count;
+       q->elem_size = q_ops->elem_size;
+       q->type = q_ops->type;
+       q->pci = mlxsw_pci;
+
+       if (q_ops->tasklet)
+               tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q);
+
+       mem_item->size = MLXSW_PCI_AQ_SIZE;
+       mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
+                                            mem_item->size,
+                                            &mem_item->mapaddr);
+       if (!mem_item->buf)
+               return -ENOMEM;
+       memset(mem_item->buf, 0, mem_item->size);
+
+       q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
+       if (!q->elem_info) {
+               err = -ENOMEM;
+               goto err_elem_info_alloc;
+       }
+
+       /* Initialize dma mapped elements info elem_info for
+        * future easy access.
+        */
+       for (i = 0; i < q->count; i++) {
+               struct mlxsw_pci_queue_elem_info *elem_info;
+
+               elem_info = mlxsw_pci_queue_elem_info_get(q, i);
+               elem_info->elem =
+                       __mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i);
+       }
+
+       mlxsw_cmd_mbox_zero(mbox);
+       err = q_ops->init(mlxsw_pci, mbox, q);
+       if (err)
+               goto err_q_ops_init;
+       return 0;
+
+err_q_ops_init:
+       kfree(q->elem_info);
+err_elem_info_alloc:
+       pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+                           mem_item->buf, mem_item->mapaddr);
+       return err;
+}
+
+static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
+                                const struct mlxsw_pci_queue_ops *q_ops,
+                                struct mlxsw_pci_queue *q)
+{
+       struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
+
+       q_ops->fini(mlxsw_pci, q);
+       kfree(q->elem_info);
+       pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+                           mem_item->buf, mem_item->mapaddr);
+}
+
+static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+                                     const struct mlxsw_pci_queue_ops *q_ops,
+                                     u8 num_qs)
+{
+       struct pci_dev *pdev = mlxsw_pci->pdev;
+       struct mlxsw_pci_queue_type_group *queue_group;
+       char tmp[16];
+       int i;
+       int err;
+
+       queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
+       queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
+       if (!queue_group->q)
+               return -ENOMEM;
+
+       for (i = 0; i < num_qs; i++) {
+               err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
+                                          &queue_group->q[i], i);
+               if (err)
+                       goto err_queue_init;
+       }
+       queue_group->count = num_qs;
+
+       sprintf(tmp, "%s_stats", mlxsw_pci_queue_type_str(q_ops->type));
+       debugfs_create_devm_seqfile(&pdev->dev, tmp, mlxsw_pci->dbg_dir,
+                                   q_ops->dbg_read);
+
+       return 0;
+
+err_queue_init:
+       for (i--; i >= 0; i--)
+               mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
+       kfree(queue_group->q);
+       return err;
+}
+
+static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
+                                      const struct mlxsw_pci_queue_ops *q_ops)
+{
+       struct mlxsw_pci_queue_type_group *queue_group;
+       int i;
+
+       queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
+       for (i = 0; i < queue_group->count; i++)
+               mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
+       kfree(queue_group->q);
+}
+
+static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
+{
+       struct pci_dev *pdev = mlxsw_pci->pdev;
+       u8 num_sdqs;
+       u8 sdq_log2sz;
+       u8 num_rdqs;
+       u8 rdq_log2sz;
+       u8 num_cqs;
+       u8 cq_log2sz;
+       u8 num_eqs;
+       u8 eq_log2sz;
+       int err;
+
+       mlxsw_cmd_mbox_zero(mbox);
+       err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
+       if (err)
+               return err;
+
+       num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
+       sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
+       num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
+       rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
+       num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
+       cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
+       num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
+       eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
+
+       if ((num_sdqs != MLXSW_PCI_SDQS_COUNT) ||
+           (num_rdqs != MLXSW_PCI_RDQS_COUNT) ||
+           (num_cqs != MLXSW_PCI_CQS_COUNT) ||
+           (num_eqs != MLXSW_PCI_EQS_COUNT)) {
+               dev_err(&pdev->dev, "Unsupported number of queues\n");
+               return -EINVAL;
+       }
+
+       if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
+           (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
+           (1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) ||
+           (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
+               dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
+               return -EINVAL;
+       }
+
+       err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
+                                        num_eqs);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to initialize event queues\n");
+               return err;
+       }
+
+       err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
+                                        num_cqs);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to initialize completion queues\n");
+               goto err_cqs_init;
+       }
+
+       err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
+                                        num_sdqs);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
+               goto err_sdqs_init;
+       }
+
+       err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
+                                        num_rdqs);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
+               goto err_rdqs_init;
+       }
+
+       /* We have to poll in command interface until queues are initialized */
+       mlxsw_pci->cmd.nopoll = true;
+       return 0;
+
+err_rdqs_init:
+       mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
+err_sdqs_init:
+       mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
+err_cqs_init:
+       mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
+       return err;
+}
+
+static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
+{
+       mlxsw_pci->cmd.nopoll = false;
+       mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
+       mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
+       mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
+       mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
+}
+
+static void
+mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
+                                    char *mbox, int index,
+                                    const struct mlxsw_swid_config *swid)
+{
+       u8 mask = 0;
+
+       if (swid->used_type) {
+               mlxsw_cmd_mbox_config_profile_swid_config_type_set(
+                       mbox, index, swid->type);
+               mask |= 1;
+       }
+       if (swid->used_properties) {
+               mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
+                       mbox, index, swid->properties);
+               mask |= 2;
+       }
+       mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
+}
+
+static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
+                                   const struct mlxsw_config_profile *profile)
+{
+       int i;
+
+       mlxsw_cmd_mbox_zero(mbox);
+
+       if (profile->used_max_vepa_channels) {
+               mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
+                       mbox, profile->max_vepa_channels);
+       }
+       if (profile->used_max_lag) {
+               mlxsw_cmd_mbox_config_profile_set_max_lag_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_max_lag_set(
+                       mbox, profile->max_lag);
+       }
+       if (profile->used_max_port_per_lag) {
+               mlxsw_cmd_mbox_config_profile_set_max_port_per_lag_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_max_port_per_lag_set(
+                       mbox, profile->max_port_per_lag);
+       }
+       if (profile->used_max_mid) {
+               mlxsw_cmd_mbox_config_profile_set_max_mid_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_max_mid_set(
+                       mbox, profile->max_mid);
+       }
+       if (profile->used_max_pgt) {
+               mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_max_pgt_set(
+                       mbox, profile->max_pgt);
+       }
+       if (profile->used_max_system_port) {
+               mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_max_system_port_set(
+                       mbox, profile->max_system_port);
+       }
+       if (profile->used_max_vlan_groups) {
+               mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
+                       mbox, profile->max_vlan_groups);
+       }
+       if (profile->used_max_regions) {
+               mlxsw_cmd_mbox_config_profile_set_max_regions_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_max_regions_set(
+                       mbox, profile->max_regions);
+       }
+       if (profile->used_flood_tables) {
+               mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
+                       mbox, profile->max_flood_tables);
+               mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
+                       mbox, profile->max_vid_flood_tables);
+       }
+       if (profile->used_flood_mode) {
+               mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_flood_mode_set(
+                       mbox, profile->flood_mode);
+       }
+       if (profile->used_max_ib_mc) {
+               mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
+                       mbox, profile->max_ib_mc);
+       }
+       if (profile->used_max_pkey) {
+               mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_max_pkey_set(
+                       mbox, profile->max_pkey);
+       }
+       if (profile->used_ar_sec) {
+               mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_ar_sec_set(
+                       mbox, profile->ar_sec);
+       }
+       if (profile->used_adaptive_routing_group_cap) {
+               mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
+                       mbox, 1);
+               mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
+                       mbox, profile->adaptive_routing_group_cap);
+       }
+
+       for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
+               mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
+                                                    &profile->swid_config[i]);
+
+       return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
+}
+
+static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
+{
+       struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
+       int err;
+
+       mlxsw_cmd_mbox_zero(mbox);
+       err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
+       if (err)
+               return err;
+       mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
+       mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
+       return 0;
+}
+
+static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+                                 u16 num_pages)
+{
+       struct mlxsw_pci_mem_item *mem_item;
+       int i;
+       int err;
+
+       mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
+                                          GFP_KERNEL);
+       if (!mlxsw_pci->fw_area.items)
+               return -ENOMEM;
+       mlxsw_pci->fw_area.num_pages = num_pages;
+
+       mlxsw_cmd_mbox_zero(mbox);
+       for (i = 0; i < num_pages; i++) {
+               mem_item = &mlxsw_pci->fw_area.items[i];
+
+               mem_item->size = MLXSW_PCI_PAGE_SIZE;
+               mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
+                                                    mem_item->size,
+                                                    &mem_item->mapaddr);
+               if (!mem_item->buf) {
+                       err = -ENOMEM;
+                       goto err_alloc;
+               }
+               mlxsw_cmd_mbox_map_fa_pa_set(mbox, i, mem_item->mapaddr);
+               mlxsw_cmd_mbox_map_fa_log2size_set(mbox, i, 0); /* 1 page */
+       }
+
+       err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, num_pages);
+       if (err)
+               goto err_cmd_map_fa;
+
+       return 0;
+
+err_cmd_map_fa:
+err_alloc:
+       for (i--; i >= 0; i--) {
+               mem_item = &mlxsw_pci->fw_area.items[i];
+
+               pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+                                   mem_item->buf, mem_item->mapaddr);
+       }
+       kfree(mlxsw_pci->fw_area.items);
+       return err;
+}
+
+static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
+{
+       struct mlxsw_pci_mem_item *mem_item;
+       int i;
+
+       mlxsw_cmd_unmap_fa(mlxsw_pci->core);
+
+       for (i = 0; i < mlxsw_pci->fw_area.num_pages; i++) {
+               mem_item = &mlxsw_pci->fw_area.items[i];
+
+               pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
+                                   mem_item->buf, mem_item->mapaddr);
+       }
+       kfree(mlxsw_pci->fw_area.items);
+}
+
+static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
+{
+       struct mlxsw_pci *mlxsw_pci = dev_id;
+       struct mlxsw_pci_queue *q;
+       int i;
+
+       for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
+               q = mlxsw_pci_eq_get(mlxsw_pci, i);
+               mlxsw_pci_queue_tasklet_schedule(q);
+       }
+       return IRQ_HANDLED;
+}
+
+static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
+                         const struct mlxsw_config_profile *profile)
+{
+       struct mlxsw_pci *mlxsw_pci = bus_priv;
+       struct pci_dev *pdev = mlxsw_pci->pdev;
+       char *mbox;
+       u16 num_pages;
+       int err;
+
+       mutex_init(&mlxsw_pci->cmd.lock);
+       init_waitqueue_head(&mlxsw_pci->cmd.wait);
+
+       mlxsw_pci->core = mlxsw_core;
+
+       mbox = mlxsw_cmd_mbox_alloc();
+       if (!mbox)
+               return -ENOMEM;
+       err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
+       if (err)
+               goto err_query_fw;
+
+       mlxsw_pci->bus_info.fw_rev.major =
+               mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
+       mlxsw_pci->bus_info.fw_rev.minor =
+               mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
+       mlxsw_pci->bus_info.fw_rev.subminor =
+               mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
+
+       if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
+               dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
+               err = -EINVAL;
+               goto err_iface_rev;
+       }
+       if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
+               dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
+               err = -EINVAL;
+               goto err_doorbell_page_bar;
+       }
+
+       mlxsw_pci->doorbell_offset =
+               mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
+
+       num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
+       err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
+       if (err)
+               goto err_fw_area_init;
+
+       err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
+       if (err)
+               goto err_boardinfo;
+
+       err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile);
+       if (err)
+               goto err_config_profile;
+
+       err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
+       if (err)
+               goto err_aqs_init;
+
+       err = request_irq(mlxsw_pci->msix_entry.vector,
+                         mlxsw_pci_eq_irq_handler, 0,
+                         mlxsw_pci_driver_name, mlxsw_pci);
+       if (err) {
+               dev_err(&pdev->dev, "IRQ request failed\n");
+               goto err_request_eq_irq;
+       }
+
+       goto mbox_put;
+
+err_request_eq_irq:
+       mlxsw_pci_aqs_fini(mlxsw_pci);
+err_aqs_init:
+err_config_profile:
+err_boardinfo:
+       mlxsw_pci_fw_area_fini(mlxsw_pci);
+err_fw_area_init:
+err_doorbell_page_bar:
+err_iface_rev:
+err_query_fw:
+mbox_put:
+       mlxsw_cmd_mbox_free(mbox);
+       return err;
+}
+
+static void mlxsw_pci_fini(void *bus_priv)
+{
+       struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+       free_irq(mlxsw_pci->msix_entry.vector, mlxsw_pci);
+       mlxsw_pci_aqs_fini(mlxsw_pci);
+       mlxsw_pci_fw_area_fini(mlxsw_pci);
+}
+
+static struct mlxsw_pci_queue *
+mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
+                  const struct mlxsw_tx_info *tx_info)
+{
+       u8 sdqn = tx_info->local_port % mlxsw_pci_sdq_count(mlxsw_pci);
+
+       return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
+}
+
+static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
+                                 const struct mlxsw_tx_info *tx_info)
+{
+       struct mlxsw_pci *mlxsw_pci = bus_priv;
+       struct mlxsw_pci_queue *q;
+       struct mlxsw_pci_queue_elem_info *elem_info;
+       char *wqe;
+       int i;
+       int err;
+
+       if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
+               err = skb_linearize(skb);
+               if (err)
+                       return err;
+       }
+
+       q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
+       spin_lock_bh(&q->lock);
+       elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
+       if (!elem_info) {
+               /* queue is full */
+               err = -EAGAIN;
+               goto unlock;
+       }
+       elem_info->u.sdq.skb = skb;
+
+       wqe = elem_info->elem;
+       mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
+       mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
+       mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
+
+       err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
+                                    skb_headlen(skb), DMA_TO_DEVICE);
+       if (err)
+               goto unlock;
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+               err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
+                                            skb_frag_address(frag),
+                                            skb_frag_size(frag),
+                                            DMA_TO_DEVICE);
+               if (err)
+                       goto unmap_frags;
+       }
+
+       /* Set unused sq entries byte count to zero. */
+       for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
+               mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
+
+       /* Everything is set up, ring producer doorbell to get HW going */
+       q->producer_counter++;
+       mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
+
+       goto unlock;
+
+unmap_frags:
+       for (; i >= 0; i--)
+               mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
+unlock:
+       spin_unlock_bh(&q->lock);
+       return err;
+}
+
+static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
+                             u32 in_mod, bool out_mbox_direct,
+                             char *in_mbox, size_t in_mbox_size,
+                             char *out_mbox, size_t out_mbox_size,
+                             u8 *p_status)
+{
+       struct mlxsw_pci *mlxsw_pci = bus_priv;
+       dma_addr_t in_mapaddr = 0;
+       dma_addr_t out_mapaddr = 0;
+       bool evreq = mlxsw_pci->cmd.nopoll;
+       unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
+       bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
+       int err;
+
+       *p_status = MLXSW_CMD_STATUS_OK;
+
+       err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
+       if (err)
+               return err;
+
+       if (in_mbox) {
+               in_mapaddr = pci_map_single(mlxsw_pci->pdev, in_mbox,
+                                           in_mbox_size, PCI_DMA_TODEVICE);
+               if (unlikely(pci_dma_mapping_error(mlxsw_pci->pdev,
+                                                  in_mapaddr))) {
+                       err = -EIO;
+                       goto err_in_mbox_map;
+               }
+       }
+       mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, in_mapaddr >> 32);
+       mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, in_mapaddr);
+
+       if (out_mbox) {
+               out_mapaddr = pci_map_single(mlxsw_pci->pdev, out_mbox,
+                                            out_mbox_size, PCI_DMA_FROMDEVICE);
+               if (unlikely(pci_dma_mapping_error(mlxsw_pci->pdev,
+                                                  out_mapaddr))) {
+                       err = -EIO;
+                       goto err_out_mbox_map;
+               }
+       }
+       mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, out_mapaddr >> 32);
+       mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, out_mapaddr);
+
+       mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
+       mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
+
+       *p_wait_done = false;
+
+       wmb(); /* all needs to be written before we write control register */
+       mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
+                         MLXSW_PCI_CIR_CTRL_GO_BIT |
+                         (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
+                         (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
+                         opcode);
+
+       if (!evreq) {
+               unsigned long end;
+
+               end = jiffies + timeout;
+               do {
+                       u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
+
+                       if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
+                               *p_wait_done = true;
+                               *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
+                               break;
+                       }
+                       cond_resched();
+               } while (time_before(jiffies, end));
+       } else {
+               wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
+               *p_status = mlxsw_pci->cmd.comp.status;
+       }
+
+       err = 0;
+       if (*p_wait_done) {
+               if (*p_status)
+                       err = -EIO;
+       } else {
+               err = -ETIMEDOUT;
+       }
+
+       if (!err && out_mbox && out_mbox_direct) {
+               /* Some commands does not use output param as address to mailbox
+                * but they store output directly into registers. In that case,
+                * copy registers into mbox buffer.
+                */
+               __be32 tmp;
+
+               if (!evreq) {
+                       tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+                                                          CIR_OUT_PARAM_HI));
+                       memcpy(out_mbox, &tmp, sizeof(tmp));
+                       tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+                                                          CIR_OUT_PARAM_LO));
+                       memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
+               }
+       }
+
+       if (out_mapaddr)
+               pci_unmap_single(mlxsw_pci->pdev, out_mapaddr, out_mbox_size,
+                                PCI_DMA_FROMDEVICE);
+
+       /* fall through */
+
+err_out_mbox_map:
+       if (in_mapaddr)
+               pci_unmap_single(mlxsw_pci->pdev, in_mapaddr, in_mbox_size,
+                                PCI_DMA_TODEVICE);
+err_in_mbox_map:
+       mutex_unlock(&mlxsw_pci->cmd.lock);
+
+       return err;
+}
+
+static const struct mlxsw_bus mlxsw_pci_bus = {
+       .kind           = "pci",
+       .init           = mlxsw_pci_init,
+       .fini           = mlxsw_pci_fini,
+       .skb_transmit   = mlxsw_pci_skb_transmit,
+       .cmd_exec       = mlxsw_pci_cmd_exec,
+};
+
+static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci)
+{
+       mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT);
+       /* Current firware does not let us know when the reset is done.
+        * So we just wait here for constant time and hope for the best.
+        */
+       msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
+       return 0;
+}
+
+static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct mlxsw_pci *mlxsw_pci;
+       int err;
+
+       mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
+       if (!mlxsw_pci)
+               return -ENOMEM;
+
+       err = pci_enable_device(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "pci_enable_device failed\n");
+               goto err_pci_enable_device;
+       }
+
+       err = pci_request_regions(pdev, mlxsw_pci_driver_name);
+       if (err) {
+               dev_err(&pdev->dev, "pci_request_regions failed\n");
+               goto err_pci_request_regions;
+       }
+
+       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+       if (!err) {
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+               if (err) {
+                       dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
+                       goto err_pci_set_dma_mask;
+               }
+       } else {
+               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (err) {
+                       dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
+                       goto err_pci_set_dma_mask;
+               }
+       }
+
+       if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
+               dev_err(&pdev->dev, "invalid PCI region size\n");
+               err = -EINVAL;
+               goto err_pci_resource_len_check;
+       }
+
+       mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
+                                    pci_resource_len(pdev, 0));
+       if (!mlxsw_pci->hw_addr) {
+               dev_err(&pdev->dev, "ioremap failed\n");
+               err = -EIO;
+               goto err_ioremap;
+       }
+       pci_set_master(pdev);
+
+       mlxsw_pci->pdev = pdev;
+       pci_set_drvdata(pdev, mlxsw_pci);
+
+       err = mlxsw_pci_sw_reset(mlxsw_pci);
+       if (err) {
+               dev_err(&pdev->dev, "Software reset failed\n");
+               goto err_sw_reset;
+       }
+
+       err = pci_enable_msix_exact(pdev, &mlxsw_pci->msix_entry, 1);
+       if (err) {
+               dev_err(&pdev->dev, "MSI-X init failed\n");
+               goto err_msix_init;
+       }
+
+       mlxsw_pci->bus_info.device_kind = mlxsw_pci_device_kind_get(id);
+       mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
+       mlxsw_pci->bus_info.dev = &pdev->dev;
+
+       mlxsw_pci->dbg_dir = debugfs_create_dir(mlxsw_pci->bus_info.device_name,
+                                               mlxsw_pci_dbg_root);
+       if (!mlxsw_pci->dbg_dir) {
+               dev_err(&pdev->dev, "Failed to create debugfs dir\n");
+               goto err_dbg_create_dir;
+       }
+
+       err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
+                                            &mlxsw_pci_bus, mlxsw_pci);
+       if (err) {
+               dev_err(&pdev->dev, "cannot register bus device\n");
+               goto err_bus_device_register;
+       }
+
+       return 0;
+
+err_bus_device_register:
+       debugfs_remove_recursive(mlxsw_pci->dbg_dir);
+err_dbg_create_dir:
+       pci_disable_msix(mlxsw_pci->pdev);
+err_msix_init:
+err_sw_reset:
+       iounmap(mlxsw_pci->hw_addr);
+err_ioremap:
+err_pci_resource_len_check:
+err_pci_set_dma_mask:
+       pci_release_regions(pdev);
+err_pci_request_regions:
+       pci_disable_device(pdev);
+err_pci_enable_device:
+       kfree(mlxsw_pci);
+       return err;
+}
+
+static void mlxsw_pci_remove(struct pci_dev *pdev)
+{
+       struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
+
+       mlxsw_core_bus_device_unregister(mlxsw_pci->core);
+       debugfs_remove_recursive(mlxsw_pci->dbg_dir);
+       pci_disable_msix(mlxsw_pci->pdev);
+       iounmap(mlxsw_pci->hw_addr);
+       pci_release_regions(mlxsw_pci->pdev);
+       pci_disable_device(mlxsw_pci->pdev);
+       kfree(mlxsw_pci);
+}
+
+static struct pci_driver mlxsw_pci_driver = {
+       .name           = mlxsw_pci_driver_name,
+       .id_table       = mlxsw_pci_id_table,
+       .probe          = mlxsw_pci_probe,
+       .remove         = mlxsw_pci_remove,
+};
+
+static int __init mlxsw_pci_module_init(void)
+{
+       int err;
+
+       mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL);
+       if (!mlxsw_pci_dbg_root)
+               return -ENOMEM;
+       err = pci_register_driver(&mlxsw_pci_driver);
+       if (err)
+               goto err_register_driver;
+       return 0;
+
+err_register_driver:
+       debugfs_remove_recursive(mlxsw_pci_dbg_root);
+       return err;
+}
+
+static void __exit mlxsw_pci_module_exit(void)
+{
+       pci_unregister_driver(&mlxsw_pci_driver);
+       debugfs_remove_recursive(mlxsw_pci_dbg_root);
+}
+
+module_init(mlxsw_pci_module_init);
+module_exit(mlxsw_pci_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox switch PCI interface driver");
+MODULE_DEVICE_TABLE(pci, mlxsw_pci_id_table);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
new file mode 100644 (file)
index 0000000..887af84
--- /dev/null
@@ -0,0 +1,221 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/pci.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_PCI_H
+#define _MLXSW_PCI_H
+
+#include <linux/bitops.h>
+
+#include "item.h"
+
+#define PCI_DEVICE_ID_MELLANOX_SWITCHX2        0xc738
+#define MLXSW_PCI_BAR0_SIZE            (1024 * 1024) /* 1MB */
+#define MLXSW_PCI_PAGE_SIZE            4096
+
+#define MLXSW_PCI_CIR_BASE                     0x71000
+#define MLXSW_PCI_CIR_IN_PARAM_HI              MLXSW_PCI_CIR_BASE
+#define MLXSW_PCI_CIR_IN_PARAM_LO              (MLXSW_PCI_CIR_BASE + 0x04)
+#define MLXSW_PCI_CIR_IN_MODIFIER              (MLXSW_PCI_CIR_BASE + 0x08)
+#define MLXSW_PCI_CIR_OUT_PARAM_HI             (MLXSW_PCI_CIR_BASE + 0x0C)
+#define MLXSW_PCI_CIR_OUT_PARAM_LO             (MLXSW_PCI_CIR_BASE + 0x10)
+#define MLXSW_PCI_CIR_TOKEN                    (MLXSW_PCI_CIR_BASE + 0x14)
+#define MLXSW_PCI_CIR_CTRL                     (MLXSW_PCI_CIR_BASE + 0x18)
+#define MLXSW_PCI_CIR_CTRL_GO_BIT              BIT(23)
+#define MLXSW_PCI_CIR_CTRL_EVREQ_BIT           BIT(22)
+#define MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT    12
+#define MLXSW_PCI_CIR_CTRL_STATUS_SHIFT                24
+#define MLXSW_PCI_CIR_TIMEOUT_MSECS            1000
+
+#define MLXSW_PCI_SW_RESET                     0xF0010
+#define MLXSW_PCI_SW_RESET_RST_BIT             BIT(0)
+#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS       5000
+
+#define MLXSW_PCI_DOORBELL_SDQ_OFFSET          0x000
+#define MLXSW_PCI_DOORBELL_RDQ_OFFSET          0x200
+#define MLXSW_PCI_DOORBELL_CQ_OFFSET           0x400
+#define MLXSW_PCI_DOORBELL_EQ_OFFSET           0x600
+#define MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET       0x800
+#define MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET       0xA00
+
+#define MLXSW_PCI_DOORBELL(offset, type_offset, num)   \
+       ((offset) + (type_offset) + (num) * 4)
+
+#define MLXSW_PCI_RDQS_COUNT   24
+#define MLXSW_PCI_SDQS_COUNT   24
+#define MLXSW_PCI_CQS_COUNT    (MLXSW_PCI_RDQS_COUNT + MLXSW_PCI_SDQS_COUNT)
+#define MLXSW_PCI_EQS_COUNT    2
+#define MLXSW_PCI_EQ_ASYNC_NUM 0
+#define MLXSW_PCI_EQ_COMP_NUM  1
+
+#define MLXSW_PCI_AQ_PAGES     8
+#define MLXSW_PCI_AQ_SIZE      (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES)
+#define MLXSW_PCI_WQE_SIZE     32 /* 32 bytes per element */
+#define MLXSW_PCI_CQE_SIZE     16 /* 16 bytes per element */
+#define MLXSW_PCI_EQE_SIZE     16 /* 16 bytes per element */
+#define MLXSW_PCI_WQE_COUNT    (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
+#define MLXSW_PCI_CQE_COUNT    (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE_SIZE)
+#define MLXSW_PCI_EQE_COUNT    (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE)
+#define MLXSW_PCI_EQE_UPDATE_COUNT     0x80
+
+#define MLXSW_PCI_WQE_SG_ENTRIES       3
+#define MLXSW_PCI_WQE_TYPE_ETHERNET    0xA
+
+/* pci_wqe_c
+ * If set it indicates that a completion should be reported upon
+ * execution of this descriptor.
+ */
+MLXSW_ITEM32(pci, wqe, c, 0x00, 31, 1);
+
+/* pci_wqe_lp
+ * Local Processing, set if packet should be processed by the local
+ * switch hardware:
+ * For Ethernet EMAD (Direct Route and non Direct Route) -
+ * must be set if packet destination is local device
+ * For InfiniBand CTL - must be set if packet destination is local device
+ * Otherwise it must be clear
+ * Local Process packets must not exceed the size of 2K (including payload
+ * and headers).
+ */
+MLXSW_ITEM32(pci, wqe, lp, 0x00, 30, 1);
+
+/* pci_wqe_type
+ * Packet type.
+ */
+MLXSW_ITEM32(pci, wqe, type, 0x00, 23, 4);
+
+/* pci_wqe_byte_count
+ * Size of i-th scatter/gather entry, 0 if entry is unused.
+ */
+MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false);
+
+/* pci_wqe_address
+ * Physical address of i-th scatter/gather entry.
+ * Gather Entries must be 2Byte aligned.
+ */
+MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false);
+
+/* pci_cqe_lag
+ * Packet arrives from a port which is a LAG
+ */
+MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1);
+
+/* pci_cqe_system_port
+ * When lag=0: System port on which the packet was received
+ * When lag=1:
+ * bits [15:4] LAG ID on which the packet was received
+ * bits [3:0] sub_port on which the packet was received
+ */
+MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
+
+/* pci_cqe_wqe_counter
+ * WQE count of the WQEs completed on the associated dqn
+ */
+MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16);
+
+/* pci_cqe_byte_count
+ * Byte count of received packets including additional two
+ * Reserved Bytes that are append to the end of the frame.
+ * Reserved for Send CQE.
+ */
+MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14);
+
+/* pci_cqe_trap_id
+ * Trap ID that captured the packet.
+ */
+MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 8);
+
+/* pci_cqe_e
+ * CQE with Error.
+ */
+MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1);
+
+/* pci_cqe_sr
+ * 1 - Send Queue
+ * 0 - Receive Queue
+ */
+MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1);
+
+/* pci_cqe_dqn
+ * Descriptor Queue (DQ) Number.
+ */
+MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5);
+
+/* pci_cqe_owner
+ * Ownership bit.
+ */
+MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1);
+
+/* pci_eqe_event_type
+ * Event type.
+ */
+MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8);
+#define MLXSW_PCI_EQE_EVENT_TYPE_COMP  0x00
+#define MLXSW_PCI_EQE_EVENT_TYPE_CMD   0x0A
+
+/* pci_eqe_event_sub_type
+ * Event type.
+ */
+MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8);
+
+/* pci_eqe_cqn
+ * Completion Queue that triggeret this EQE.
+ */
+MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7);
+
+/* pci_eqe_owner
+ * Ownership bit.
+ */
+MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
+
+/* pci_eqe_cmd_token
+ * Command completion event - token
+ */
+MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16);
+
+/* pci_eqe_cmd_status
+ * Command completion event - status
+ */
+MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8);
+
+/* pci_eqe_cmd_out_param_h
+ * Command completion event - output parameter - higher part
+ */
+MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32);
+
+/* pci_eqe_cmd_out_param_l
+ * Command completion event - output parameter - lower part
+ */
+MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
new file mode 100644 (file)
index 0000000..726f543
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/port.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXSW_PORT_H
+#define _MLXSW_PORT_H
+
+#include <linux/types.h>
+
+#define MLXSW_PORT_MAX_MTU             10000
+
+#define MLXSW_PORT_DEFAULT_VID         1
+
+#define MLXSW_PORT_SWID_DISABLED_PORT  255
+#define MLXSW_PORT_SWID_ALL_SWIDS      254
+#define MLXSW_PORT_SWID_TYPE_ETH       2
+
+#define MLXSW_PORT_MID                 0xd000
+
+#define MLXSW_PORT_MAX_PHY_PORTS       0x40
+#define MLXSW_PORT_MAX_PORTS           MLXSW_PORT_MAX_PHY_PORTS
+
+#define MLXSW_PORT_DEVID_BITS_OFFSET   10
+#define MLXSW_PORT_PHY_BITS_OFFSET     4
+#define MLXSW_PORT_PHY_BITS_MASK       (MLXSW_PORT_MAX_PHY_PORTS - 1)
+
+#define MLXSW_PORT_CPU_PORT            0x0
+
+#define MLXSW_PORT_DONT_CARE           (MLXSW_PORT_MAX_PORTS)
+
+enum mlxsw_port_admin_status {
+       MLXSW_PORT_ADMIN_STATUS_UP = 1,
+       MLXSW_PORT_ADMIN_STATUS_DOWN = 2,
+       MLXSW_PORT_ADMIN_STATUS_UP_ONCE = 3,
+       MLXSW_PORT_ADMIN_STATUS_DISABLED = 4,
+};
+
+enum mlxsw_reg_pude_oper_status {
+       MLXSW_PORT_OPER_STATUS_UP = 1,
+       MLXSW_PORT_OPER_STATUS_DOWN = 2,
+       MLXSW_PORT_OPER_STATUS_FAILURE = 4,     /* Can be set to up again. */
+};
+
+#endif /* _MLXSW_PORT_H */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
new file mode 100644 (file)
index 0000000..b5a72f8
--- /dev/null
@@ -0,0 +1,1289 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/reg.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_REG_H
+#define _MLXSW_REG_H
+
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+
+#include "item.h"
+#include "port.h"
+
+struct mlxsw_reg_info {
+       u16 id;
+       u16 len; /* In u8 */
+};
+
+#define MLXSW_REG(type) (&mlxsw_reg_##type)
+#define MLXSW_REG_LEN(type) MLXSW_REG(type)->len
+#define MLXSW_REG_ZERO(type, payload) memset(payload, 0, MLXSW_REG(type)->len)
+
+/* SGCR - Switch General Configuration Register
+ * --------------------------------------------
+ * This register is used for configuration of the switch capabilities.
+ */
+#define MLXSW_REG_SGCR_ID 0x2000
+#define MLXSW_REG_SGCR_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_sgcr = {
+       .id = MLXSW_REG_SGCR_ID,
+       .len = MLXSW_REG_SGCR_LEN,
+};
+
+/* reg_sgcr_llb
+ * Link Local Broadcast (Default=0)
+ * When set, all Link Local packets (224.0.0.X) will be treated as broadcast
+ * packets and ignore the IGMP snooping entries.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sgcr, llb, 0x04, 0, 1);
+
+static inline void mlxsw_reg_sgcr_pack(char *payload, bool llb)
+{
+       MLXSW_REG_ZERO(sgcr, payload);
+       mlxsw_reg_sgcr_llb_set(payload, !!llb);
+}
+
+/* SPAD - Switch Physical Address Register
+ * ---------------------------------------
+ * The SPAD register configures the switch physical MAC address.
+ */
+#define MLXSW_REG_SPAD_ID 0x2002
+#define MLXSW_REG_SPAD_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_spad = {
+       .id = MLXSW_REG_SPAD_ID,
+       .len = MLXSW_REG_SPAD_LEN,
+};
+
+/* reg_spad_base_mac
+ * Base MAC address for the switch partitions.
+ * Per switch partition MAC address is equal to:
+ * base_mac + swid
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6);
+
+/* SMID - Switch Multicast ID
+ * --------------------------
+ * In multi-chip configuration, each device should maintain mapping between
+ * Multicast ID (MID) into a list of local ports. This mapping is used in all
+ * the devices other than the ingress device, and is implemented as part of the
+ * FDB. The MID record maps from a MID, which is a unique identi- fier of the
+ * multicast group within the stacking domain, into a list of local ports into
+ * which the packet is replicated.
+ */
+#define MLXSW_REG_SMID_ID 0x2007
+#define MLXSW_REG_SMID_LEN 0x420
+
+static const struct mlxsw_reg_info mlxsw_reg_smid = {
+       .id = MLXSW_REG_SMID_ID,
+       .len = MLXSW_REG_SMID_LEN,
+};
+
+/* reg_smid_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8);
+
+/* reg_smid_mid
+ * Multicast identifier - global identifier that represents the multicast group
+ * across all devices
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16);
+
+/* reg_smid_port
+ * Local port memebership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1);
+
+/* reg_smid_port_mask
+ * Local port mask (1 bit per port).
+ * Access: W
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1);
+
+static inline void mlxsw_reg_smid_pack(char *payload, u16 mid)
+{
+       MLXSW_REG_ZERO(smid, payload);
+       mlxsw_reg_smid_swid_set(payload, 0);
+       mlxsw_reg_smid_mid_set(payload, mid);
+       mlxsw_reg_smid_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
+       mlxsw_reg_smid_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
+}
+
+/* SPMS - Switch Port MSTP/RSTP State Register
+ * -------------------------------------------
+ * Configures the spanning tree state of a physical port.
+ */
+#define MLXSW_REG_SPMS_ID 0x200d
+#define MLXSW_REG_SPMS_LEN 0x404
+
+static const struct mlxsw_reg_info mlxsw_reg_spms = {
+       .id = MLXSW_REG_SPMS_ID,
+       .len = MLXSW_REG_SPMS_LEN,
+};
+
+/* reg_spms_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spms, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_spms_state {
+       MLXSW_REG_SPMS_STATE_NO_CHANGE,
+       MLXSW_REG_SPMS_STATE_DISCARDING,
+       MLXSW_REG_SPMS_STATE_LEARNING,
+       MLXSW_REG_SPMS_STATE_FORWARDING,
+};
+
+/* reg_spms_state
+ * Spanning tree state of each VLAN ID (VID) of the local port.
+ * 0 - Do not change spanning tree state (used only when writing).
+ * 1 - Discarding. No learning or forwarding to/from this port (default).
+ * 2 - Learning. Port is learning, but not forwarding.
+ * 3 - Forwarding. Port is learning and forwarding.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2);
+
+static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port, u16 vid,
+                                      enum mlxsw_reg_spms_state state)
+{
+       MLXSW_REG_ZERO(spms, payload);
+       mlxsw_reg_spms_local_port_set(payload, local_port);
+       mlxsw_reg_spms_state_set(payload, vid, state);
+}
+
+/* SFGC - Switch Flooding Group Configuration
+ * ------------------------------------------
+ * The following register controls the association of flooding tables and MIDs
+ * to packet types used for flooding.
+ */
+#define MLXSW_REG_SFGC_ID  0x2011
+#define MLXSW_REG_SFGC_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_sfgc = {
+       .id = MLXSW_REG_SFGC_ID,
+       .len = MLXSW_REG_SFGC_LEN,
+};
+
+enum mlxsw_reg_sfgc_type {
+       MLXSW_REG_SFGC_TYPE_BROADCAST = 0,
+       MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST = 1,
+       MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4 = 2,
+       MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6 = 3,
+       MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP = 5,
+       MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL = 6,
+       MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST = 7,
+};
+
+/* reg_sfgc_type
+ * The traffic type to reach the flooding table.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfgc, type, 0x00, 0, 4);
+
+enum mlxsw_reg_sfgc_bridge_type {
+       MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID = 0,
+       MLXSW_REG_SFGC_BRIDGE_TYPE_VFID = 1,
+};
+
+/* reg_sfgc_bridge_type
+ * Access: Index
+ *
+ * Note: SwitchX-2 only supports 802.1Q mode.
+ */
+MLXSW_ITEM32(reg, sfgc, bridge_type, 0x04, 24, 3);
+
+enum mlxsw_flood_table_type {
+       MLXSW_REG_SFGC_TABLE_TYPE_VID = 1,
+       MLXSW_REG_SFGC_TABLE_TYPE_SINGLE = 2,
+       MLXSW_REG_SFGC_TABLE_TYPE_ANY = 0,
+       MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST = 3,
+       MLXSW_REG_SFGC_TABLE_TYPE_FID = 4,
+};
+
+/* reg_sfgc_table_type
+ * See mlxsw_flood_table_type
+ * Access: RW
+ *
+ * Note: FID offset and FID types are not supported in SwitchX-2.
+ */
+MLXSW_ITEM32(reg, sfgc, table_type, 0x04, 16, 3);
+
+/* reg_sfgc_flood_table
+ * Flooding table index to associate with the specific type on the specific
+ * switch partition.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, flood_table, 0x04, 0, 6);
+
+/* reg_sfgc_mid
+ * The multicast ID for the swid. Not supported for Spectrum
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, mid, 0x08, 0, 16);
+
+/* reg_sfgc_counter_set_type
+ * Counter Set Type for flow counters.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, counter_set_type, 0x0C, 24, 8);
+
+/* reg_sfgc_counter_index
+ * Counter Index for flow counters.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfgc, counter_index, 0x0C, 0, 24);
+
+static inline void
+mlxsw_reg_sfgc_pack(char *payload, enum mlxsw_reg_sfgc_type type,
+                   enum mlxsw_reg_sfgc_bridge_type bridge_type,
+                   enum mlxsw_flood_table_type table_type,
+                   unsigned int flood_table)
+{
+       MLXSW_REG_ZERO(sfgc, payload);
+       mlxsw_reg_sfgc_type_set(payload, type);
+       mlxsw_reg_sfgc_bridge_type_set(payload, bridge_type);
+       mlxsw_reg_sfgc_table_type_set(payload, table_type);
+       mlxsw_reg_sfgc_flood_table_set(payload, flood_table);
+       mlxsw_reg_sfgc_mid_set(payload, MLXSW_PORT_MID);
+}
+
+/* SFTR - Switch Flooding Table Register
+ * -------------------------------------
+ * The switch flooding table is used for flooding packet replication. The table
+ * defines a bit mask of ports for packet replication.
+ */
+#define MLXSW_REG_SFTR_ID 0x2012
+#define MLXSW_REG_SFTR_LEN 0x420
+
+static const struct mlxsw_reg_info mlxsw_reg_sftr = {
+       .id = MLXSW_REG_SFTR_ID,
+       .len = MLXSW_REG_SFTR_LEN,
+};
+
+/* reg_sftr_swid
+ * Switch partition ID with which to associate the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, swid, 0x00, 24, 8);
+
+/* reg_sftr_flood_table
+ * Flooding table index to associate with the specific type on the specific
+ * switch partition.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, flood_table, 0x00, 16, 6);
+
+/* reg_sftr_index
+ * Index. Used as an index into the Flooding Table in case the table is
+ * configured to use VID / FID or FID Offset.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, index, 0x00, 0, 16);
+
+/* reg_sftr_table_type
+ * See mlxsw_flood_table_type
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sftr, table_type, 0x04, 16, 3);
+
+/* reg_sftr_range
+ * Range of entries to update
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sftr, range, 0x04, 0, 16);
+
+/* reg_sftr_port
+ * Local port membership (1 bit per port).
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr, port, 0x20, 0x20, 1);
+
+/* reg_sftr_cpu_port_mask
+ * CPU port mask (1 bit per port).
+ * Access: W
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sftr, port_mask, 0x220, 0x20, 1);
+
+static inline void mlxsw_reg_sftr_pack(char *payload,
+                                      unsigned int flood_table,
+                                      unsigned int index,
+                                      enum mlxsw_flood_table_type table_type,
+                                      unsigned int range)
+{
+       MLXSW_REG_ZERO(sftr, payload);
+       mlxsw_reg_sftr_swid_set(payload, 0);
+       mlxsw_reg_sftr_flood_table_set(payload, flood_table);
+       mlxsw_reg_sftr_index_set(payload, index);
+       mlxsw_reg_sftr_table_type_set(payload, table_type);
+       mlxsw_reg_sftr_range_set(payload, range);
+       mlxsw_reg_sftr_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
+       mlxsw_reg_sftr_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
+}
+
+/* SPMLR - Switch Port MAC Learning Register
+ * -----------------------------------------
+ * Controls the Switch MAC learning policy per port.
+ */
+#define MLXSW_REG_SPMLR_ID 0x2018
+#define MLXSW_REG_SPMLR_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_spmlr = {
+       .id = MLXSW_REG_SPMLR_ID,
+       .len = MLXSW_REG_SPMLR_LEN,
+};
+
+/* reg_spmlr_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spmlr, local_port, 0x00, 16, 8);
+
+/* reg_spmlr_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, spmlr, sub_port, 0x00, 8, 8);
+
+enum mlxsw_reg_spmlr_learn_mode {
+       MLXSW_REG_SPMLR_LEARN_MODE_DISABLE = 0,
+       MLXSW_REG_SPMLR_LEARN_MODE_ENABLE = 2,
+       MLXSW_REG_SPMLR_LEARN_MODE_SEC = 3,
+};
+
+/* reg_spmlr_learn_mode
+ * Learning mode on the port.
+ * 0 - Learning disabled.
+ * 2 - Learning enabled.
+ * 3 - Security mode.
+ *
+ * In security mode the switch does not learn MACs on the port, but uses the
+ * SMAC to see if it exists on another ingress port. If so, the packet is
+ * classified as a bad packet and is discarded unless the software registers
+ * to receive port security error packets usign HPKT.
+ */
+MLXSW_ITEM32(reg, spmlr, learn_mode, 0x04, 30, 2);
+
+static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port,
+                                       enum mlxsw_reg_spmlr_learn_mode mode)
+{
+       MLXSW_REG_ZERO(spmlr, payload);
+       mlxsw_reg_spmlr_local_port_set(payload, local_port);
+       mlxsw_reg_spmlr_sub_port_set(payload, 0);
+       mlxsw_reg_spmlr_learn_mode_set(payload, mode);
+}
+
+/* PMLP - Ports Module to Local Port Register
+ * ------------------------------------------
+ * Configures the assignment of modules to local ports.
+ */
+#define MLXSW_REG_PMLP_ID 0x5002
+#define MLXSW_REG_PMLP_LEN 0x40
+
+static const struct mlxsw_reg_info mlxsw_reg_pmlp = {
+       .id = MLXSW_REG_PMLP_ID,
+       .len = MLXSW_REG_PMLP_LEN,
+};
+
+/* reg_pmlp_rxtx
+ * 0 - Tx value is used for both Tx and Rx.
+ * 1 - Rx value is taken from a separte field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmlp, rxtx, 0x00, 31, 1);
+
+/* reg_pmlp_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmlp, local_port, 0x00, 16, 8);
+
+/* reg_pmlp_width
+ * 0 - Unmap local port.
+ * 1 - Lane 0 is used.
+ * 2 - Lanes 0 and 1 are used.
+ * 4 - Lanes 0, 1, 2 and 3 are used.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
+
+/* reg_pmlp_module
+ * Module number.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0, false);
+
+/* reg_pmlp_tx_lane
+ * Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 16, false);
+
+/* reg_pmlp_rx_lane
+ * Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is
+ * equal to Tx lane.
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 24, false);
+
+static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
+{
+       MLXSW_REG_ZERO(pmlp, payload);
+       mlxsw_reg_pmlp_local_port_set(payload, local_port);
+}
+
+/* PMTU - Port MTU Register
+ * ------------------------
+ * Configures and reports the port MTU.
+ */
+#define MLXSW_REG_PMTU_ID 0x5003
+#define MLXSW_REG_PMTU_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_pmtu = {
+       .id = MLXSW_REG_PMTU_ID,
+       .len = MLXSW_REG_PMTU_LEN,
+};
+
+/* reg_pmtu_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtu, local_port, 0x00, 16, 8);
+
+/* reg_pmtu_max_mtu
+ * Maximum MTU.
+ * When port type (e.g. Ethernet) is configured, the relevant MTU is
+ * reported, otherwise the minimum between the max_mtu of the different
+ * types is reported.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtu, max_mtu, 0x04, 16, 16);
+
+/* reg_pmtu_admin_mtu
+ * MTU value to set port to. Must be smaller or equal to max_mtu.
+ * Note: If port type is Infiniband, then port must be disabled, when its
+ * MTU is set.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmtu, admin_mtu, 0x08, 16, 16);
+
+/* reg_pmtu_oper_mtu
+ * The actual MTU configured on the port. Packets exceeding this size
+ * will be dropped.
+ * Note: In Ethernet and FC oper_mtu == admin_mtu, however, in Infiniband
+ * oper_mtu might be smaller than admin_mtu.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtu, oper_mtu, 0x0C, 16, 16);
+
+static inline void mlxsw_reg_pmtu_pack(char *payload, u8 local_port,
+                                      u16 new_mtu)
+{
+       MLXSW_REG_ZERO(pmtu, payload);
+       mlxsw_reg_pmtu_local_port_set(payload, local_port);
+       mlxsw_reg_pmtu_max_mtu_set(payload, 0);
+       mlxsw_reg_pmtu_admin_mtu_set(payload, new_mtu);
+       mlxsw_reg_pmtu_oper_mtu_set(payload, 0);
+}
+
+/* PTYS - Port Type and Speed Register
+ * -----------------------------------
+ * Configures and reports the port speed type.
+ *
+ * Note: When set while the link is up, the changes will not take effect
+ * until the port transitions from down to up state.
+ */
+#define MLXSW_REG_PTYS_ID 0x5004
+#define MLXSW_REG_PTYS_LEN 0x40
+
+static const struct mlxsw_reg_info mlxsw_reg_ptys = {
+       .id = MLXSW_REG_PTYS_ID,
+       .len = MLXSW_REG_PTYS_LEN,
+};
+
+/* reg_ptys_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptys, local_port, 0x00, 16, 8);
+
+#define MLXSW_REG_PTYS_PROTO_MASK_ETH  BIT(2)
+
+/* reg_ptys_proto_mask
+ * Protocol mask. Indicates which protocol is used.
+ * 0 - Infiniband.
+ * 1 - Fibre Channel.
+ * 2 - Ethernet.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptys, proto_mask, 0x00, 0, 3);
+
+#define MLXSW_REG_PTYS_ETH_SPEED_SGMII                 BIT(0)
+#define MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX           BIT(1)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4           BIT(2)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4           BIT(3)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR            BIT(4)
+#define MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2           BIT(5)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4           BIT(6)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4           BIT(7)
+#define MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4            BIT(8)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR            BIT(12)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR            BIT(13)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR         BIT(14)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4           BIT(15)
+#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4       BIT(16)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4           BIT(19)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4          BIT(20)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4          BIT(21)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4          BIT(22)
+#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4      BIT(23)
+#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX            BIT(24)
+#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_T             BIT(25)
+#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T             BIT(26)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR            BIT(27)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR            BIT(28)
+#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR            BIT(29)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2           BIT(30)
+#define MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2           BIT(31)
+
+/* reg_ptys_eth_proto_cap
+ * Ethernet port supported speeds and protocols.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_cap, 0x0C, 0, 32);
+
+/* reg_ptys_eth_proto_admin
+ * Speed and protocol to set port to.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_admin, 0x18, 0, 32);
+
+/* reg_ptys_eth_proto_oper
+ * The current speed and protocol configured for the port.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptys, eth_proto_oper, 0x24, 0, 32);
+
+static inline void mlxsw_reg_ptys_pack(char *payload, u8 local_port,
+                                      u32 proto_admin)
+{
+       MLXSW_REG_ZERO(ptys, payload);
+       mlxsw_reg_ptys_local_port_set(payload, local_port);
+       mlxsw_reg_ptys_proto_mask_set(payload, MLXSW_REG_PTYS_PROTO_MASK_ETH);
+       mlxsw_reg_ptys_eth_proto_admin_set(payload, proto_admin);
+}
+
+static inline void mlxsw_reg_ptys_unpack(char *payload, u32 *p_eth_proto_cap,
+                                        u32 *p_eth_proto_adm,
+                                        u32 *p_eth_proto_oper)
+{
+       if (p_eth_proto_cap)
+               *p_eth_proto_cap = mlxsw_reg_ptys_eth_proto_cap_get(payload);
+       if (p_eth_proto_adm)
+               *p_eth_proto_adm = mlxsw_reg_ptys_eth_proto_admin_get(payload);
+       if (p_eth_proto_oper)
+               *p_eth_proto_oper = mlxsw_reg_ptys_eth_proto_oper_get(payload);
+}
+
+/* PPAD - Port Physical Address Register
+ * -------------------------------------
+ * The PPAD register configures the per port physical MAC address.
+ */
+#define MLXSW_REG_PPAD_ID 0x5005
+#define MLXSW_REG_PPAD_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_ppad = {
+       .id = MLXSW_REG_PPAD_ID,
+       .len = MLXSW_REG_PPAD_LEN,
+};
+
+/* reg_ppad_single_base_mac
+ * 0: base_mac, local port should be 0 and mac[7:0] is
+ * reserved. HW will set incremental
+ * 1: single_mac - mac of the local_port
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppad, single_base_mac, 0x00, 28, 1);
+
+/* reg_ppad_local_port
+ * port number, if single_base_mac = 0 then local_port is reserved
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppad, local_port, 0x00, 16, 8);
+
+/* reg_ppad_mac
+ * If single_base_mac = 0 - base MAC address, mac[7:0] is reserved.
+ * If single_base_mac = 1 - the per port MAC address
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ppad, mac, 0x02, 6);
+
+static inline void mlxsw_reg_ppad_pack(char *payload, bool single_base_mac,
+                                      u8 local_port)
+{
+       MLXSW_REG_ZERO(ppad, payload);
+       mlxsw_reg_ppad_single_base_mac_set(payload, !!single_base_mac);
+       mlxsw_reg_ppad_local_port_set(payload, local_port);
+}
+
+/* PAOS - Ports Administrative and Operational Status Register
+ * -----------------------------------------------------------
+ * Configures and retrieves per port administrative and operational status.
+ */
+#define MLXSW_REG_PAOS_ID 0x5006
+#define MLXSW_REG_PAOS_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_paos = {
+       .id = MLXSW_REG_PAOS_ID,
+       .len = MLXSW_REG_PAOS_LEN,
+};
+
+/* reg_paos_swid
+ * Switch partition ID with which to associate the port.
+ * Note: while external ports uses unique local port numbers (and thus swid is
+ * redundant), router ports use the same local port number where swid is the
+ * only indication for the relevant port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, paos, swid, 0x00, 24, 8);
+
+/* reg_paos_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, paos, local_port, 0x00, 16, 8);
+
+/* reg_paos_admin_status
+ * Port administrative state (the desired state of the port):
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Up once. This means that in case of link failure, the port won't go
+ *     into polling mode, but will wait to be re-enabled by software.
+ * 4 - Disabled by system. Can only be set by hardware.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, paos, admin_status, 0x00, 8, 4);
+
+/* reg_paos_oper_status
+ * Port operational state (the current state):
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Down by port failure. This means that the device will not let the
+ *     port up again until explicitly specified by software.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, paos, oper_status, 0x00, 0, 4);
+
+/* reg_paos_ase
+ * Admin state update enabled.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, paos, ase, 0x04, 31, 1);
+
+/* reg_paos_ee
+ * Event update enable. If this bit is set, event generation will be
+ * updated based on the e field.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, paos, ee, 0x04, 30, 1);
+
+/* reg_paos_e
+ * Event generation on operational state change:
+ * 0 - Do not generate event.
+ * 1 - Generate Event.
+ * 2 - Generate Single Event.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, paos, e, 0x04, 0, 2);
+
+static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port,
+                                      enum mlxsw_port_admin_status status)
+{
+       MLXSW_REG_ZERO(paos, payload);
+       mlxsw_reg_paos_swid_set(payload, 0);
+       mlxsw_reg_paos_local_port_set(payload, local_port);
+       mlxsw_reg_paos_admin_status_set(payload, status);
+       mlxsw_reg_paos_oper_status_set(payload, 0);
+       mlxsw_reg_paos_ase_set(payload, 1);
+       mlxsw_reg_paos_ee_set(payload, 1);
+       mlxsw_reg_paos_e_set(payload, 1);
+}
+
+/* PPCNT - Ports Performance Counters Register
+ * -------------------------------------------
+ * The PPCNT register retrieves per port performance counters.
+ */
+#define MLXSW_REG_PPCNT_ID 0x5008
+#define MLXSW_REG_PPCNT_LEN 0x100
+
+static const struct mlxsw_reg_info mlxsw_reg_ppcnt = {
+       .id = MLXSW_REG_PPCNT_ID,
+       .len = MLXSW_REG_PPCNT_LEN,
+};
+
+/* reg_ppcnt_swid
+ * For HCA: must be always 0.
+ * Switch partition ID to associate port with.
+ * Switch partitions are numbered from 0 to 7 inclusively.
+ * Switch partition 254 indicates stacking ports.
+ * Switch partition 255 indicates all switch partitions.
+ * Only valid on Set() operation with local_port=255.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, swid, 0x00, 24, 8);
+
+/* reg_ppcnt_local_port
+ * Local port number.
+ * 255 indicates all ports on the device, and is only allowed
+ * for Set() operation.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8);
+
+/* reg_ppcnt_pnat
+ * Port number access type:
+ * 0 - Local port number
+ * 1 - IB port number
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
+
+/* reg_ppcnt_grp
+ * Performance counter group.
+ * Group 63 indicates all groups. Only valid on Set() operation with
+ * clr bit set.
+ * 0x0: IEEE 802.3 Counters
+ * 0x1: RFC 2863 Counters
+ * 0x2: RFC 2819 Counters
+ * 0x3: RFC 3635 Counters
+ * 0x5: Ethernet Extended Counters
+ * 0x8: Link Level Retransmission Counters
+ * 0x10: Per Priority Counters
+ * 0x11: Per Traffic Class Counters
+ * 0x12: Physical Layer Counters
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6);
+
+/* reg_ppcnt_clr
+ * Clear counters. Setting the clr bit will reset the counter value
+ * for all counters in the counter group. This bit can be set
+ * for both Set() and Get() operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1);
+
+/* reg_ppcnt_prio_tc
+ * Priority for counter set that support per priority, valid values: 0-7.
+ * Traffic class for counter set that support per traffic class,
+ * valid values: 0- cap_max_tclass-1 .
+ * For HCA: cap_max_tclass is always 8.
+ * Otherwise must be 0.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5);
+
+/* reg_ppcnt_a_frames_transmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frames_transmitted_ok,
+            0x08 + 0x00, 0, 64);
+
+/* reg_ppcnt_a_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frames_received_ok,
+            0x08 + 0x08, 0, 64);
+
+/* reg_ppcnt_a_frame_check_sequence_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frame_check_sequence_errors,
+            0x08 + 0x10, 0, 64);
+
+/* reg_ppcnt_a_alignment_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_alignment_errors,
+            0x08 + 0x18, 0, 64);
+
+/* reg_ppcnt_a_octets_transmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_octets_transmitted_ok,
+            0x08 + 0x20, 0, 64);
+
+/* reg_ppcnt_a_octets_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_octets_received_ok,
+            0x08 + 0x28, 0, 64);
+
+/* reg_ppcnt_a_multicast_frames_xmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_xmitted_ok,
+            0x08 + 0x30, 0, 64);
+
+/* reg_ppcnt_a_broadcast_frames_xmitted_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_xmitted_ok,
+            0x08 + 0x38, 0, 64);
+
+/* reg_ppcnt_a_multicast_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_received_ok,
+            0x08 + 0x40, 0, 64);
+
+/* reg_ppcnt_a_broadcast_frames_received_ok
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_received_ok,
+            0x08 + 0x48, 0, 64);
+
+/* reg_ppcnt_a_in_range_length_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_in_range_length_errors,
+            0x08 + 0x50, 0, 64);
+
+/* reg_ppcnt_a_out_of_range_length_field
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_out_of_range_length_field,
+            0x08 + 0x58, 0, 64);
+
+/* reg_ppcnt_a_frame_too_long_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_frame_too_long_errors,
+            0x08 + 0x60, 0, 64);
+
+/* reg_ppcnt_a_symbol_error_during_carrier
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_symbol_error_during_carrier,
+            0x08 + 0x68, 0, 64);
+
+/* reg_ppcnt_a_mac_control_frames_transmitted
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_transmitted,
+            0x08 + 0x70, 0, 64);
+
+/* reg_ppcnt_a_mac_control_frames_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_received,
+            0x08 + 0x78, 0, 64);
+
+/* reg_ppcnt_a_unsupported_opcodes_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_unsupported_opcodes_received,
+            0x08 + 0x80, 0, 64);
+
+/* reg_ppcnt_a_pause_mac_ctrl_frames_received
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
+            0x08 + 0x88, 0, 64);
+
+/* reg_ppcnt_a_pause_mac_ctrl_frames_transmitted
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
+            0x08 + 0x90, 0, 64);
+
+static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port)
+{
+       MLXSW_REG_ZERO(ppcnt, payload);
+       mlxsw_reg_ppcnt_swid_set(payload, 0);
+       mlxsw_reg_ppcnt_local_port_set(payload, local_port);
+       mlxsw_reg_ppcnt_pnat_set(payload, 0);
+       mlxsw_reg_ppcnt_grp_set(payload, 0);
+       mlxsw_reg_ppcnt_clr_set(payload, 0);
+       mlxsw_reg_ppcnt_prio_tc_set(payload, 0);
+}
+
+/* PSPA - Port Switch Partition Allocation
+ * ---------------------------------------
+ * Controls the association of a port with a switch partition and enables
+ * configuring ports as stacking ports.
+ */
+#define MLXSW_REG_PSPA_ID 0x500d
+#define MLXSW_REG_PSPA_LEN 0x8
+
+static const struct mlxsw_reg_info mlxsw_reg_pspa = {
+       .id = MLXSW_REG_PSPA_ID,
+       .len = MLXSW_REG_PSPA_LEN,
+};
+
+/* reg_pspa_swid
+ * Switch partition ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pspa, swid, 0x00, 24, 8);
+
+/* reg_pspa_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pspa, local_port, 0x00, 16, 8);
+
+/* reg_pspa_sub_port
+ * Virtual port within the local port. Set to 0 when virtual ports are
+ * disabled on the local port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pspa, sub_port, 0x00, 8, 8);
+
+static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
+{
+       MLXSW_REG_ZERO(pspa, payload);
+       mlxsw_reg_pspa_swid_set(payload, swid);
+       mlxsw_reg_pspa_local_port_set(payload, local_port);
+       mlxsw_reg_pspa_sub_port_set(payload, 0);
+}
+
+/* HTGT - Host Trap Group Table
+ * ----------------------------
+ * Configures the properties for forwarding to CPU.
+ */
+#define MLXSW_REG_HTGT_ID 0x7002
+#define MLXSW_REG_HTGT_LEN 0x100
+
+static const struct mlxsw_reg_info mlxsw_reg_htgt = {
+       .id = MLXSW_REG_HTGT_ID,
+       .len = MLXSW_REG_HTGT_LEN,
+};
+
+/* reg_htgt_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, htgt, swid, 0x00, 24, 8);
+
+#define MLXSW_REG_HTGT_PATH_TYPE_LOCAL 0x0     /* For locally attached CPU */
+
+/* reg_htgt_type
+ * CPU path type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
+
+#define MLXSW_REG_HTGT_TRAP_GROUP_EMAD 0x0
+#define MLXSW_REG_HTGT_TRAP_GROUP_RX   0x1
+
+/* reg_htgt_trap_group
+ * Trap group number. User defined number specifying which trap groups
+ * should be forwarded to the CPU. The mapping between trap IDs and trap
+ * groups is configured using HPKT register.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, htgt, trap_group, 0x00, 0, 8);
+
+enum {
+       MLXSW_REG_HTGT_POLICER_DISABLE,
+       MLXSW_REG_HTGT_POLICER_ENABLE,
+};
+
+/* reg_htgt_pide
+ * Enable policer ID specified using 'pid' field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, pide, 0x04, 15, 1);
+
+/* reg_htgt_pid
+ * Policer ID for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, pid, 0x04, 0, 8);
+
+#define MLXSW_REG_HTGT_TRAP_TO_CPU 0x0
+
+/* reg_htgt_mirror_action
+ * Mirror action to use.
+ * 0 - Trap to CPU.
+ * 1 - Trap to CPU and mirror to a mirroring agent.
+ * 2 - Mirror to a mirroring agent and do not trap to CPU.
+ * Access: RW
+ *
+ * Note: Mirroring to a mirroring agent is only supported in Spectrum.
+ */
+MLXSW_ITEM32(reg, htgt, mirror_action, 0x08, 8, 2);
+
+/* reg_htgt_mirroring_agent
+ * Mirroring agent.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, mirroring_agent, 0x08, 0, 3);
+
+/* reg_htgt_priority
+ * Trap group priority.
+ * In case a packet matches multiple classification rules, the packet will
+ * only be trapped once, based on the trap ID associated with the group (via
+ * register HPKT) with the highest priority.
+ * Supported values are 0-7, with 7 represnting the highest priority.
+ * Access: RW
+ *
+ * Note: In SwitchX-2 this field is ignored and the priority value is replaced
+ * by the 'trap_group' field.
+ */
+MLXSW_ITEM32(reg, htgt, priority, 0x0C, 0, 4);
+
+/* reg_htgt_local_path_cpu_tclass
+ * CPU ingress traffic class for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6);
+
+#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD     0x15
+#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX       0x14
+
+/* reg_htgt_local_path_rdq
+ * Receive descriptor queue (RDQ) to use for the trap group.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6);
+
+static inline void mlxsw_reg_htgt_pack(char *payload, u8 trap_group)
+{
+       u8 swid, rdq;
+
+       MLXSW_REG_ZERO(htgt, payload);
+       if (MLXSW_REG_HTGT_TRAP_GROUP_EMAD == trap_group) {
+               swid = MLXSW_PORT_SWID_ALL_SWIDS;
+               rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD;
+       } else {
+               swid = 0;
+               rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX;
+       }
+       mlxsw_reg_htgt_swid_set(payload, swid);
+       mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL);
+       mlxsw_reg_htgt_trap_group_set(payload, trap_group);
+       mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE);
+       mlxsw_reg_htgt_pid_set(payload, 0);
+       mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU);
+       mlxsw_reg_htgt_mirroring_agent_set(payload, 0);
+       mlxsw_reg_htgt_priority_set(payload, 0);
+       mlxsw_reg_htgt_local_path_cpu_tclass_set(payload, 7);
+       mlxsw_reg_htgt_local_path_rdq_set(payload, rdq);
+}
+
+/* HPKT - Host Packet Trap
+ * -----------------------
+ * Configures trap IDs inside trap groups.
+ */
+#define MLXSW_REG_HPKT_ID 0x7003
+#define MLXSW_REG_HPKT_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_hpkt = {
+       .id = MLXSW_REG_HPKT_ID,
+       .len = MLXSW_REG_HPKT_LEN,
+};
+
+enum {
+       MLXSW_REG_HPKT_ACK_NOT_REQUIRED,
+       MLXSW_REG_HPKT_ACK_REQUIRED,
+};
+
+/* reg_hpkt_ack
+ * Require acknowledgements from the host for events.
+ * If set, then the device will wait for the event it sent to be acknowledged
+ * by the host. This option is only relevant for event trap IDs.
+ * Access: RW
+ *
+ * Note: Currently not supported by firmware.
+ */
+MLXSW_ITEM32(reg, hpkt, ack, 0x00, 24, 1);
+
+enum mlxsw_reg_hpkt_action {
+       MLXSW_REG_HPKT_ACTION_FORWARD,
+       MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+       MLXSW_REG_HPKT_ACTION_MIRROR_TO_CPU,
+       MLXSW_REG_HPKT_ACTION_DISCARD,
+       MLXSW_REG_HPKT_ACTION_SOFT_DISCARD,
+       MLXSW_REG_HPKT_ACTION_TRAP_AND_SOFT_DISCARD,
+};
+
+/* reg_hpkt_action
+ * Action to perform on packet when trapped.
+ * 0 - No action. Forward to CPU based on switching rules.
+ * 1 - Trap to CPU (CPU receives sole copy).
+ * 2 - Mirror to CPU (CPU receives a replica of the packet).
+ * 3 - Discard.
+ * 4 - Soft discard (allow other traps to act on the packet).
+ * 5 - Trap and soft discard (allow other traps to overwrite this trap).
+ * Access: RW
+ *
+ * Note: Must be set to 0 (forward) for event trap IDs, as they are already
+ * addressed to the CPU.
+ */
+MLXSW_ITEM32(reg, hpkt, action, 0x00, 20, 3);
+
+/* reg_hpkt_trap_group
+ * Trap group to associate the trap with.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, hpkt, trap_group, 0x00, 12, 6);
+
+/* reg_hpkt_trap_id
+ * Trap ID.
+ * Access: Index
+ *
+ * Note: A trap ID can only be associated with a single trap group. The device
+ * will associate the trap ID with the last trap group configured.
+ */
+MLXSW_ITEM32(reg, hpkt, trap_id, 0x00, 0, 9);
+
+enum {
+       MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT,
+       MLXSW_REG_HPKT_CTRL_PACKET_NO_BUFFER,
+       MLXSW_REG_HPKT_CTRL_PACKET_USE_BUFFER,
+};
+
+/* reg_hpkt_ctrl
+ * Configure dedicated buffer resources for control packets.
+ * 0 - Keep factory defaults.
+ * 1 - Do not use control buffer for this trap ID.
+ * 2 - Use control buffer for this trap ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2);
+
+static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action,
+                                      u8 trap_group, u16 trap_id)
+{
+       MLXSW_REG_ZERO(hpkt, payload);
+       mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED);
+       mlxsw_reg_hpkt_action_set(payload, action);
+       mlxsw_reg_hpkt_trap_group_set(payload, trap_group);
+       mlxsw_reg_hpkt_trap_id_set(payload, trap_id);
+       mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT);
+}
+
+static inline const char *mlxsw_reg_id_str(u16 reg_id)
+{
+       switch (reg_id) {
+       case MLXSW_REG_SGCR_ID:
+               return "SGCR";
+       case MLXSW_REG_SPAD_ID:
+               return "SPAD";
+       case MLXSW_REG_SMID_ID:
+               return "SMID";
+       case MLXSW_REG_SPMS_ID:
+               return "SPMS";
+       case MLXSW_REG_SFGC_ID:
+               return "SFGC";
+       case MLXSW_REG_SFTR_ID:
+               return "SFTR";
+       case MLXSW_REG_SPMLR_ID:
+               return "SPMLR";
+       case MLXSW_REG_PMLP_ID:
+               return "PMLP";
+       case MLXSW_REG_PMTU_ID:
+               return "PMTU";
+       case MLXSW_REG_PTYS_ID:
+               return "PTYS";
+       case MLXSW_REG_PPAD_ID:
+               return "PPAD";
+       case MLXSW_REG_PAOS_ID:
+               return "PAOS";
+       case MLXSW_REG_PPCNT_ID:
+               return "PPCNT";
+       case MLXSW_REG_PSPA_ID:
+               return "PSPA";
+       case MLXSW_REG_HTGT_ID:
+               return "HTGT";
+       case MLXSW_REG_HPKT_ID:
+               return "HPKT";
+       default:
+               return "*UNKNOWN*";
+       }
+}
+
+/* PUDE - Port Up / Down Event
+ * ---------------------------
+ * Reports the operational state change of a port.
+ */
+#define MLXSW_REG_PUDE_LEN 0x10
+
+/* reg_pude_swid
+ * Switch partition ID with which to associate the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pude, swid, 0x00, 24, 8);
+
+/* reg_pude_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pude, local_port, 0x00, 16, 8);
+
+/* reg_pude_admin_status
+ * Port administrative state (the desired state).
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Up once. This means that in case of link failure, the port won't go
+ *     into polling mode, but will wait to be re-enabled by software.
+ * 4 - Disabled by system. Can only be set by hardware.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pude, admin_status, 0x00, 8, 4);
+
+/* reg_pude_oper_status
+ * Port operatioanl state.
+ * 1 - Up.
+ * 2 - Down.
+ * 3 - Down by port failure. This means that the device will not let the
+ *     port up again until explicitly specified by software.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pude, oper_status, 0x00, 0, 4);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
new file mode 100644 (file)
index 0000000..29b46ee
--- /dev/null
@@ -0,0 +1,1552 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <net/switchdev.h>
+#include <generated/utsrelease.h>
+
+#include "core.h"
+#include "reg.h"
+#include "port.h"
+#include "trap.h"
+#include "txheader.h"
+
+static const char mlxsw_sx_driver_name[] = "mlxsw_switchx2";
+static const char mlxsw_sx_driver_version[] = "1.0";
+
+struct mlxsw_sx_port;
+
+#define MLXSW_SW_HW_ID_LEN 6
+
+struct mlxsw_sx {
+       struct mlxsw_sx_port **ports;
+       struct mlxsw_core *core;
+       const struct mlxsw_bus_info *bus_info;
+       u8 hw_id[MLXSW_SW_HW_ID_LEN];
+};
+
+struct mlxsw_sx_port_pcpu_stats {
+       u64                     rx_packets;
+       u64                     rx_bytes;
+       u64                     tx_packets;
+       u64                     tx_bytes;
+       struct u64_stats_sync   syncp;
+       u32                     tx_dropped;
+};
+
+struct mlxsw_sx_port {
+       struct net_device *dev;
+       struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
+       struct mlxsw_sx *mlxsw_sx;
+       u8 local_port;
+};
+
+/* tx_hdr_version
+ * Tx header version.
+ * Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
+
+/* tx_hdr_ctl
+ * Packet control type.
+ * 0 - Ethernet control (e.g. EMADs, LACP)
+ * 1 - Ethernet data
+ */
+MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
+
+/* tx_hdr_proto
+ * Packet protocol type. Must be set to 1 (Ethernet).
+ */
+MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
+
+/* tx_hdr_etclass
+ * Egress TClass to be used on the egress device on the egress port.
+ * The MSB is specified in the 'ctclass3' field.
+ * Range is 0-15, where 15 is the highest priority.
+ */
+MLXSW_ITEM32(tx, hdr, etclass, 0x00, 18, 3);
+
+/* tx_hdr_swid
+ * Switch partition ID.
+ */
+MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
+
+/* tx_hdr_port_mid
+ * Destination local port for unicast packets.
+ * Destination multicast ID for multicast packets.
+ *
+ * Control packets are directed to a specific egress port, while data
+ * packets are transmitted through the CPU port (0) into the switch partition,
+ * where forwarding rules are applied.
+ */
+MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
+
+/* tx_hdr_ctclass3
+ * See field 'etclass'.
+ */
+MLXSW_ITEM32(tx, hdr, ctclass3, 0x04, 14, 1);
+
+/* tx_hdr_rdq
+ * RDQ for control packets sent to remote CPU.
+ * Must be set to 0x1F for EMADs, otherwise 0.
+ */
+MLXSW_ITEM32(tx, hdr, rdq, 0x04, 9, 5);
+
+/* tx_hdr_cpu_sig
+ * Signature control for packets going to CPU. Must be set to 0.
+ */
+MLXSW_ITEM32(tx, hdr, cpu_sig, 0x04, 0, 9);
+
+/* tx_hdr_sig
+ * Stacking protocl signature. Must be set to 0xE0E0.
+ */
+MLXSW_ITEM32(tx, hdr, sig, 0x0C, 16, 16);
+
+/* tx_hdr_stclass
+ * Stacking TClass.
+ */
+MLXSW_ITEM32(tx, hdr, stclass, 0x0C, 13, 3);
+
+/* tx_hdr_emad
+ * EMAD bit. Must be set for EMADs.
+ */
+MLXSW_ITEM32(tx, hdr, emad, 0x0C, 5, 1);
+
+/* tx_hdr_type
+ * 0 - Data packets
+ * 6 - Control packets
+ */
+MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+
+static void mlxsw_sx_txhdr_construct(struct sk_buff *skb,
+                                    const struct mlxsw_tx_info *tx_info)
+{
+       char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
+       bool is_emad = tx_info->is_emad;
+
+       memset(txhdr, 0, MLXSW_TXHDR_LEN);
+
+       /* We currently set default values for the egress tclass (QoS). */
+       mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_0);
+       mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
+       mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
+       mlxsw_tx_hdr_etclass_set(txhdr, is_emad ? MLXSW_TXHDR_ETCLASS_6 :
+                                                 MLXSW_TXHDR_ETCLASS_5);
+       mlxsw_tx_hdr_swid_set(txhdr, 0);
+       mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
+       mlxsw_tx_hdr_ctclass3_set(txhdr, MLXSW_TXHDR_CTCLASS3);
+       mlxsw_tx_hdr_rdq_set(txhdr, is_emad ? MLXSW_TXHDR_RDQ_EMAD :
+                                             MLXSW_TXHDR_RDQ_OTHER);
+       mlxsw_tx_hdr_cpu_sig_set(txhdr, MLXSW_TXHDR_CPU_SIG);
+       mlxsw_tx_hdr_sig_set(txhdr, MLXSW_TXHDR_SIG);
+       mlxsw_tx_hdr_stclass_set(txhdr, MLXSW_TXHDR_STCLASS_NONE);
+       mlxsw_tx_hdr_emad_set(txhdr, is_emad ? MLXSW_TXHDR_EMAD :
+                                              MLXSW_TXHDR_NOT_EMAD);
+       mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
+}
+
+static int mlxsw_sx_port_admin_status_set(struct mlxsw_sx_port *mlxsw_sx_port,
+                                         bool is_up)
+{
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       char paos_pl[MLXSW_REG_PAOS_LEN];
+
+       mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port,
+                           is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
+                           MLXSW_PORT_ADMIN_STATUS_DOWN);
+       return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
+}
+
+static int mlxsw_sx_port_oper_status_get(struct mlxsw_sx_port *mlxsw_sx_port,
+                                        bool *p_is_up)
+{
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       char paos_pl[MLXSW_REG_PAOS_LEN];
+       u8 oper_status;
+       int err;
+
+       mlxsw_reg_paos_pack(paos_pl, mlxsw_sx_port->local_port, 0);
+       err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(paos), paos_pl);
+       if (err)
+               return err;
+       oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
+       *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
+       return 0;
+}
+
+static int mlxsw_sx_port_mtu_set(struct mlxsw_sx_port *mlxsw_sx_port, u16 mtu)
+{
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       char pmtu_pl[MLXSW_REG_PMTU_LEN];
+       int max_mtu;
+       int err;
+
+       mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
+       mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, 0);
+       err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
+       if (err)
+               return err;
+       max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
+
+       if (mtu > max_mtu)
+               return -EINVAL;
+
+       mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sx_port->local_port, mtu);
+       return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pmtu), pmtu_pl);
+}
+
+static int mlxsw_sx_port_swid_set(struct mlxsw_sx_port *mlxsw_sx_port, u8 swid)
+{
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       char pspa_pl[MLXSW_REG_PSPA_LEN];
+
+       mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sx_port->local_port);
+       return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(pspa), pspa_pl);
+}
+
+static int mlxsw_sx_port_module_check(struct mlxsw_sx_port *mlxsw_sx_port,
+                                     bool *p_usable)
+{
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       char pmlp_pl[MLXSW_REG_PMLP_LEN];
+       int err;
+
+       mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sx_port->local_port);
+       err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(pmlp), pmlp_pl);
+       if (err)
+               return err;
+       *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
+       return 0;
+}
+
+static int mlxsw_sx_port_open(struct net_device *dev)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+       int err;
+
+       err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
+       if (err)
+               return err;
+       netif_start_queue(dev);
+       return 0;
+}
+
+static int mlxsw_sx_port_stop(struct net_device *dev)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+
+       netif_stop_queue(dev);
+       return mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+}
+
+static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
+                                     struct net_device *dev)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
+       const struct mlxsw_tx_info tx_info = {
+               .local_port = mlxsw_sx_port->local_port,
+               .is_emad = false,
+       };
+       struct sk_buff *skb_old = NULL;
+       int err;
+
+       if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
+               struct sk_buff *skb_new;
+
+               skb_old = skb;
+               skb_new = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
+               if (!skb_new) {
+                       this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+                       dev_kfree_skb_any(skb_old);
+                       return NETDEV_TX_OK;
+               }
+               skb = skb_new;
+       }
+       mlxsw_sx_txhdr_construct(skb, &tx_info);
+       err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info);
+       if (err == -EAGAIN) {
+               if (skb_old)
+                       dev_kfree_skb_any(skb);
+               return NETDEV_TX_BUSY;
+       }
+
+       if (skb_old)
+               dev_kfree_skb_any(skb_old);
+
+       if (!err) {
+               pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
+               u64_stats_update_begin(&pcpu_stats->syncp);
+               pcpu_stats->tx_packets++;
+               pcpu_stats->tx_bytes += skb->len;
+               u64_stats_update_end(&pcpu_stats->syncp);
+       } else {
+               this_cpu_inc(mlxsw_sx_port->pcpu_stats->tx_dropped);
+               dev_kfree_skb_any(skb);
+       }
+       return NETDEV_TX_OK;
+}
+
+static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+       int err;
+
+       err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, mtu);
+       if (err)
+               return err;
+       dev->mtu = mtu;
+       return 0;
+}
+
+static struct rtnl_link_stats64 *
+mlxsw_sx_port_get_stats64(struct net_device *dev,
+                         struct rtnl_link_stats64 *stats)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+       struct mlxsw_sx_port_pcpu_stats *p;
+       u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+       u32 tx_dropped = 0;
+       unsigned int start;
+       int i;
+
+       for_each_possible_cpu(i) {
+               p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i);
+               do {
+                       start = u64_stats_fetch_begin_irq(&p->syncp);
+                       rx_packets      = p->rx_packets;
+                       rx_bytes        = p->rx_bytes;
+                       tx_packets      = p->tx_packets;
+                       tx_bytes        = p->tx_bytes;
+               } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+
+               stats->rx_packets       += rx_packets;
+               stats->rx_bytes         += rx_bytes;
+               stats->tx_packets       += tx_packets;
+               stats->tx_bytes         += tx_bytes;
+               /* tx_dropped is u32, updated without syncp protection. */
+               tx_dropped      += p->tx_dropped;
+       }
+       stats->tx_dropped       = tx_dropped;
+       return stats;
+}
+
+static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
+       .ndo_open               = mlxsw_sx_port_open,
+       .ndo_stop               = mlxsw_sx_port_stop,
+       .ndo_start_xmit         = mlxsw_sx_port_xmit,
+       .ndo_change_mtu         = mlxsw_sx_port_change_mtu,
+       .ndo_get_stats64        = mlxsw_sx_port_get_stats64,
+};
+
+static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
+                                     struct ethtool_drvinfo *drvinfo)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+
+       strlcpy(drvinfo->driver, mlxsw_sx_driver_name, sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, mlxsw_sx_driver_version,
+               sizeof(drvinfo->version));
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+                "%d.%d.%d",
+                mlxsw_sx->bus_info->fw_rev.major,
+                mlxsw_sx->bus_info->fw_rev.minor,
+                mlxsw_sx->bus_info->fw_rev.subminor);
+       strlcpy(drvinfo->bus_info, mlxsw_sx->bus_info->device_name,
+               sizeof(drvinfo->bus_info));
+}
+
+struct mlxsw_sx_port_hw_stats {
+       char str[ETH_GSTRING_LEN];
+       u64 (*getter)(char *payload);
+};
+
+static const struct mlxsw_sx_port_hw_stats mlxsw_sx_port_hw_stats[] = {
+       {
+               .str = "a_frames_transmitted_ok",
+               .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
+       },
+       {
+               .str = "a_frames_received_ok",
+               .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
+       },
+       {
+               .str = "a_frame_check_sequence_errors",
+               .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
+       },
+       {
+               .str = "a_alignment_errors",
+               .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
+       },
+       {
+               .str = "a_octets_transmitted_ok",
+               .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
+       },
+       {
+               .str = "a_octets_received_ok",
+               .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
+       },
+       {
+               .str = "a_multicast_frames_xmitted_ok",
+               .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
+       },
+       {
+               .str = "a_broadcast_frames_xmitted_ok",
+               .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
+       },
+       {
+               .str = "a_multicast_frames_received_ok",
+               .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
+       },
+       {
+               .str = "a_broadcast_frames_received_ok",
+               .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
+       },
+       {
+               .str = "a_in_range_length_errors",
+               .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
+       },
+       {
+               .str = "a_out_of_range_length_field",
+               .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
+       },
+       {
+               .str = "a_frame_too_long_errors",
+               .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
+       },
+       {
+               .str = "a_symbol_error_during_carrier",
+               .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
+       },
+       {
+               .str = "a_mac_control_frames_transmitted",
+               .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
+       },
+       {
+               .str = "a_mac_control_frames_received",
+               .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
+       },
+       {
+               .str = "a_unsupported_opcodes_received",
+               .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
+       },
+       {
+               .str = "a_pause_mac_ctrl_frames_received",
+               .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
+       },
+       {
+               .str = "a_pause_mac_ctrl_frames_xmitted",
+               .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
+       },
+};
+
+#define MLXSW_SX_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sx_port_hw_stats)
+
+static void mlxsw_sx_port_get_strings(struct net_device *dev,
+                                     u32 stringset, u8 *data)
+{
+       u8 *p = data;
+       int i;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) {
+                       memcpy(p, mlxsw_sx_port_hw_stats[i].str,
+                              ETH_GSTRING_LEN);
+                       p += ETH_GSTRING_LEN;
+               }
+               break;
+       }
+}
+
+static void mlxsw_sx_port_get_stats(struct net_device *dev,
+                                   struct ethtool_stats *stats, u64 *data)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+       int i;
+       int err;
+
+       mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port);
+       err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
+       for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
+               data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
+}
+
+static int mlxsw_sx_port_get_sset_count(struct net_device *dev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return MLXSW_SX_PORT_HW_STATS_LEN;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+struct mlxsw_sx_port_link_mode {
+       u32 mask;
+       u32 supported;
+       u32 advertised;
+       u32 speed;
+};
+
+static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
+               .supported      = SUPPORTED_100baseT_Full,
+               .advertised     = ADVERTISED_100baseT_Full,
+               .speed          = 100,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
+               .speed          = 100,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
+                                 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
+               .supported      = SUPPORTED_1000baseKX_Full,
+               .advertised     = ADVERTISED_1000baseKX_Full,
+               .speed          = 1000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
+               .supported      = SUPPORTED_10000baseT_Full,
+               .advertised     = ADVERTISED_10000baseT_Full,
+               .speed          = 10000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
+                                 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
+               .supported      = SUPPORTED_10000baseKX4_Full,
+               .advertised     = ADVERTISED_10000baseKX4_Full,
+               .speed          = 10000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+                                 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+                                 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+                                 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
+               .supported      = SUPPORTED_10000baseKR_Full,
+               .advertised     = ADVERTISED_10000baseKR_Full,
+               .speed          = 10000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
+               .supported      = SUPPORTED_20000baseKR2_Full,
+               .advertised     = ADVERTISED_20000baseKR2_Full,
+               .speed          = 20000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
+               .supported      = SUPPORTED_40000baseCR4_Full,
+               .advertised     = ADVERTISED_40000baseCR4_Full,
+               .speed          = 40000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
+               .supported      = SUPPORTED_40000baseKR4_Full,
+               .advertised     = ADVERTISED_40000baseKR4_Full,
+               .speed          = 40000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
+               .supported      = SUPPORTED_40000baseSR4_Full,
+               .advertised     = ADVERTISED_40000baseSR4_Full,
+               .speed          = 40000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
+               .supported      = SUPPORTED_40000baseLR4_Full,
+               .advertised     = ADVERTISED_40000baseLR4_Full,
+               .speed          = 40000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
+                                 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
+                                 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+               .speed          = 25000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
+                                 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
+                                 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
+               .speed          = 50000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
+               .supported      = SUPPORTED_56000baseKR4_Full,
+               .advertised     = ADVERTISED_56000baseKR4_Full,
+               .speed          = 56000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
+                                 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+                                 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+                                 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+               .speed          = 100000,
+       },
+};
+
+#define MLXSW_SX_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sx_port_link_mode)
+
+static u32 mlxsw_sx_from_ptys_supported_port(u32 ptys_eth_proto)
+{
+       if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+                             MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+               return SUPPORTED_FIBRE;
+
+       if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+                             MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
+               return SUPPORTED_Backplane;
+       return 0;
+}
+
+static u32 mlxsw_sx_from_ptys_supported_link(u32 ptys_eth_proto)
+{
+       u32 modes = 0;
+       int i;
+
+       for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+               if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
+                       modes |= mlxsw_sx_port_link_mode[i].supported;
+       }
+       return modes;
+}
+
+static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)
+{
+       u32 modes = 0;
+       int i;
+
+       for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+               if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask)
+                       modes |= mlxsw_sx_port_link_mode[i].advertised;
+       }
+       return modes;
+}
+
+static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
+                                           struct ethtool_cmd *cmd)
+{
+       u32 speed = SPEED_UNKNOWN;
+       u8 duplex = DUPLEX_UNKNOWN;
+       int i;
+
+       if (!carrier_ok)
+               goto out;
+
+       for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+               if (ptys_eth_proto & mlxsw_sx_port_link_mode[i].mask) {
+                       speed = mlxsw_sx_port_link_mode[i].speed;
+                       duplex = DUPLEX_FULL;
+                       break;
+               }
+       }
+out:
+       ethtool_cmd_speed_set(cmd, speed);
+       cmd->duplex = duplex;
+}
+
+static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
+{
+       if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+               return PORT_FIBRE;
+
+       if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
+               return PORT_DA;
+
+       if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+                             MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
+               return PORT_NONE;
+
+       return PORT_OTHER;
+}
+
+static int mlxsw_sx_port_get_settings(struct net_device *dev,
+                                     struct ethtool_cmd *cmd)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       char ptys_pl[MLXSW_REG_PTYS_LEN];
+       u32 eth_proto_cap;
+       u32 eth_proto_admin;
+       u32 eth_proto_oper;
+       int err;
+
+       mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+       err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+       if (err) {
+               netdev_err(dev, "Failed to get proto");
+               return err;
+       }
+       mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
+                             &eth_proto_admin, &eth_proto_oper);
+
+       cmd->supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
+                        mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
+                        SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+       cmd->advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
+       mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev),
+                                       eth_proto_oper, cmd);
+
+       eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+       cmd->port = mlxsw_sx_port_connector_port(eth_proto_oper);
+       cmd->lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
+
+       cmd->transceiver = XCVR_INTERNAL;
+       return 0;
+}
+
+static u32 mlxsw_sx_to_ptys_advert_link(u32 advertising)
+{
+       u32 ptys_proto = 0;
+       int i;
+
+       for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+               if (advertising & mlxsw_sx_port_link_mode[i].advertised)
+                       ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
+       }
+       return ptys_proto;
+}
+
+static u32 mlxsw_sx_to_ptys_speed(u32 speed)
+{
+       u32 ptys_proto = 0;
+       int i;
+
+       for (i = 0; i < MLXSW_SX_PORT_LINK_MODE_LEN; i++) {
+               if (speed == mlxsw_sx_port_link_mode[i].speed)
+                       ptys_proto |= mlxsw_sx_port_link_mode[i].mask;
+       }
+       return ptys_proto;
+}
+
+static int mlxsw_sx_port_set_settings(struct net_device *dev,
+                                     struct ethtool_cmd *cmd)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       char ptys_pl[MLXSW_REG_PTYS_LEN];
+       u32 speed;
+       u32 eth_proto_new;
+       u32 eth_proto_cap;
+       u32 eth_proto_admin;
+       bool is_up;
+       int err;
+
+       speed = ethtool_cmd_speed(cmd);
+
+       eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
+               mlxsw_sx_to_ptys_advert_link(cmd->advertising) :
+               mlxsw_sx_to_ptys_speed(speed);
+
+       mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
+       err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+       if (err) {
+               netdev_err(dev, "Failed to get proto");
+               return err;
+       }
+       mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
+
+       eth_proto_new = eth_proto_new & eth_proto_cap;
+       if (!eth_proto_new) {
+               netdev_err(dev, "Not supported proto admin requested");
+               return -EINVAL;
+       }
+       if (eth_proto_new == eth_proto_admin)
+               return 0;
+
+       mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, eth_proto_new);
+       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+       if (err) {
+               netdev_err(dev, "Failed to set proto admin");
+               return err;
+       }
+
+       err = mlxsw_sx_port_oper_status_get(mlxsw_sx_port, &is_up);
+       if (err) {
+               netdev_err(dev, "Failed to get oper status");
+               return err;
+       }
+       if (!is_up)
+               return 0;
+
+       err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+       if (err) {
+               netdev_err(dev, "Failed to set admin status");
+               return err;
+       }
+
+       err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, true);
+       if (err) {
+               netdev_err(dev, "Failed to set admin status");
+               return err;
+       }
+
+       return 0;
+}
+
+static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
+       .get_drvinfo            = mlxsw_sx_port_get_drvinfo,
+       .get_link               = ethtool_op_get_link,
+       .get_strings            = mlxsw_sx_port_get_strings,
+       .get_ethtool_stats      = mlxsw_sx_port_get_stats,
+       .get_sset_count         = mlxsw_sx_port_get_sset_count,
+       .get_settings           = mlxsw_sx_port_get_settings,
+       .set_settings           = mlxsw_sx_port_set_settings,
+};
+
+static int mlxsw_sx_port_attr_get(struct net_device *dev,
+                                 struct switchdev_attr *attr)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+
+       switch (attr->id) {
+       case SWITCHDEV_ATTR_PORT_PARENT_ID:
+               attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
+               memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static const struct switchdev_ops mlxsw_sx_port_switchdev_ops = {
+       .switchdev_port_attr_get        = mlxsw_sx_port_attr_get,
+};
+
+static int mlxsw_sx_hw_id_get(struct mlxsw_sx *mlxsw_sx)
+{
+       char spad_pl[MLXSW_REG_SPAD_LEN];
+       int err;
+
+       err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(spad), spad_pl);
+       if (err)
+               return err;
+       mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sx->hw_id);
+       return 0;
+}
+
+static int mlxsw_sx_port_dev_addr_get(struct mlxsw_sx_port *mlxsw_sx_port)
+{
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       struct net_device *dev = mlxsw_sx_port->dev;
+       char ppad_pl[MLXSW_REG_PPAD_LEN];
+       int err;
+
+       mlxsw_reg_ppad_pack(ppad_pl, false, 0);
+       err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppad), ppad_pl);
+       if (err)
+               return err;
+       mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
+       /* The last byte value in base mac address is guaranteed
+        * to be such it does not overflow when adding local_port
+        * value.
+        */
+       dev->dev_addr[ETH_ALEN - 1] += mlxsw_sx_port->local_port;
+       return 0;
+}
+
+static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
+                                      u16 vid, enum mlxsw_reg_spms_state state)
+{
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       char *spms_pl;
+       int err;
+
+       spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
+       if (!spms_pl)
+               return -ENOMEM;
+       mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port, vid, state);
+       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
+       kfree(spms_pl);
+       return err;
+}
+
+static int mlxsw_sx_port_speed_set(struct mlxsw_sx_port *mlxsw_sx_port,
+                                  u32 speed)
+{
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       char ptys_pl[MLXSW_REG_PTYS_LEN];
+
+       mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sx_port->local_port, speed);
+       return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(ptys), ptys_pl);
+}
+
+static int
+mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
+                                   enum mlxsw_reg_spmlr_learn_mode mode)
+{
+       struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
+       char spmlr_pl[MLXSW_REG_SPMLR_LEN];
+
+       mlxsw_reg_spmlr_pack(spmlr_pl, mlxsw_sx_port->local_port, mode);
+       return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spmlr), spmlr_pl);
+}
+
+static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port;
+       struct net_device *dev;
+       bool usable;
+       int err;
+
+       dev = alloc_etherdev(sizeof(struct mlxsw_sx_port));
+       if (!dev)
+               return -ENOMEM;
+       mlxsw_sx_port = netdev_priv(dev);
+       mlxsw_sx_port->dev = dev;
+       mlxsw_sx_port->mlxsw_sx = mlxsw_sx;
+       mlxsw_sx_port->local_port = local_port;
+
+       mlxsw_sx_port->pcpu_stats =
+               netdev_alloc_pcpu_stats(struct mlxsw_sx_port_pcpu_stats);
+       if (!mlxsw_sx_port->pcpu_stats) {
+               err = -ENOMEM;
+               goto err_alloc_stats;
+       }
+
+       dev->netdev_ops = &mlxsw_sx_port_netdev_ops;
+       dev->ethtool_ops = &mlxsw_sx_port_ethtool_ops;
+       dev->switchdev_ops = &mlxsw_sx_port_switchdev_ops;
+
+       err = mlxsw_sx_port_dev_addr_get(mlxsw_sx_port);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Port %d: Unable to get port mac address\n",
+                       mlxsw_sx_port->local_port);
+               goto err_dev_addr_get;
+       }
+
+       netif_carrier_off(dev);
+
+       dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
+                        NETIF_F_VLAN_CHALLENGED;
+
+       /* Each packet needs to have a Tx header (metadata) on top all other
+        * headers.
+        */
+       dev->hard_header_len += MLXSW_TXHDR_LEN;
+
+       err = mlxsw_sx_port_module_check(mlxsw_sx_port, &usable);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to check module\n",
+                       mlxsw_sx_port->local_port);
+               goto err_port_module_check;
+       }
+
+       if (!usable) {
+               dev_dbg(mlxsw_sx->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
+                       mlxsw_sx_port->local_port);
+               goto port_not_usable;
+       }
+
+       err = mlxsw_sx_port_swid_set(mlxsw_sx_port, 0);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set SWID\n",
+                       mlxsw_sx_port->local_port);
+               goto err_port_swid_set;
+       }
+
+       err = mlxsw_sx_port_speed_set(mlxsw_sx_port,
+                                     MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set speed\n",
+                       mlxsw_sx_port->local_port);
+               goto err_port_speed_set;
+       }
+
+       err = mlxsw_sx_port_mtu_set(mlxsw_sx_port, ETH_DATA_LEN);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MTU\n",
+                       mlxsw_sx_port->local_port);
+               goto err_port_mtu_set;
+       }
+
+       err = mlxsw_sx_port_admin_status_set(mlxsw_sx_port, false);
+       if (err)
+               goto err_port_admin_status_set;
+
+       err = mlxsw_sx_port_stp_state_set(mlxsw_sx_port,
+                                         MLXSW_PORT_DEFAULT_VID,
+                                         MLXSW_REG_SPMS_STATE_FORWARDING);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set STP state\n",
+                       mlxsw_sx_port->local_port);
+               goto err_port_stp_state_set;
+       }
+
+       err = mlxsw_sx_port_mac_learning_mode_set(mlxsw_sx_port,
+                                                 MLXSW_REG_SPMLR_LEARN_MODE_DISABLE);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set MAC learning mode\n",
+                       mlxsw_sx_port->local_port);
+               goto err_port_mac_learning_mode_set;
+       }
+
+       err = register_netdev(dev);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register netdev\n",
+                       mlxsw_sx_port->local_port);
+               goto err_register_netdev;
+       }
+
+       mlxsw_sx->ports[local_port] = mlxsw_sx_port;
+       return 0;
+
+err_register_netdev:
+err_port_admin_status_set:
+err_port_mac_learning_mode_set:
+err_port_stp_state_set:
+err_port_mtu_set:
+err_port_speed_set:
+err_port_swid_set:
+port_not_usable:
+err_port_module_check:
+err_dev_addr_get:
+       free_percpu(mlxsw_sx_port->pcpu_stats);
+err_alloc_stats:
+       free_netdev(dev);
+       return err;
+}
+
+static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
+{
+       struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
+
+       if (!mlxsw_sx_port)
+               return;
+       unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
+       mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
+       free_percpu(mlxsw_sx_port->pcpu_stats);
+}
+
+static void mlxsw_sx_ports_remove(struct mlxsw_sx *mlxsw_sx)
+{
+       int i;
+
+       for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
+               mlxsw_sx_port_remove(mlxsw_sx, i);
+       kfree(mlxsw_sx->ports);
+}
+
+static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx)
+{
+       size_t alloc_size;
+       int i;
+       int err;
+
+       alloc_size = sizeof(struct mlxsw_sx_port *) * MLXSW_PORT_MAX_PORTS;
+       mlxsw_sx->ports = kzalloc(alloc_size, GFP_KERNEL);
+       if (!mlxsw_sx->ports)
+               return -ENOMEM;
+
+       for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
+               err = mlxsw_sx_port_create(mlxsw_sx, i);
+               if (err)
+                       goto err_port_create;
+       }
+       return 0;
+
+err_port_create:
+       for (i--; i >= 1; i--)
+               mlxsw_sx_port_remove(mlxsw_sx, i);
+       kfree(mlxsw_sx->ports);
+       return err;
+}
+
+static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg,
+                                    char *pude_pl, void *priv)
+{
+       struct mlxsw_sx *mlxsw_sx = priv;
+       struct mlxsw_sx_port *mlxsw_sx_port;
+       enum mlxsw_reg_pude_oper_status status;
+       u8 local_port;
+
+       local_port = mlxsw_reg_pude_local_port_get(pude_pl);
+       mlxsw_sx_port = mlxsw_sx->ports[local_port];
+       if (!mlxsw_sx_port) {
+               dev_warn(mlxsw_sx->bus_info->dev, "Port %d: Link event received for non-existent port\n",
+                        local_port);
+               return;
+       }
+
+       status = mlxsw_reg_pude_oper_status_get(pude_pl);
+       if (MLXSW_PORT_OPER_STATUS_UP == status) {
+               netdev_info(mlxsw_sx_port->dev, "link up\n");
+               netif_carrier_on(mlxsw_sx_port->dev);
+       } else {
+               netdev_info(mlxsw_sx_port->dev, "link down\n");
+               netif_carrier_off(mlxsw_sx_port->dev);
+       }
+}
+
+static struct mlxsw_event_listener mlxsw_sx_pude_event = {
+       .func = mlxsw_sx_pude_event_func,
+       .trap_id = MLXSW_TRAP_ID_PUDE,
+};
+
+static int mlxsw_sx_event_register(struct mlxsw_sx *mlxsw_sx,
+                                  enum mlxsw_event_trap_id trap_id)
+{
+       struct mlxsw_event_listener *el;
+       char hpkt_pl[MLXSW_REG_HPKT_LEN];
+       int err;
+
+       switch (trap_id) {
+       case MLXSW_TRAP_ID_PUDE:
+               el = &mlxsw_sx_pude_event;
+               break;
+       }
+       err = mlxsw_core_event_listener_register(mlxsw_sx->core, el, mlxsw_sx);
+       if (err)
+               return err;
+
+       mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+                           MLXSW_REG_HTGT_TRAP_GROUP_EMAD, trap_id);
+       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+       if (err)
+               goto err_event_trap_set;
+
+       return 0;
+
+err_event_trap_set:
+       mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
+       return err;
+}
+
+static void mlxsw_sx_event_unregister(struct mlxsw_sx *mlxsw_sx,
+                                     enum mlxsw_event_trap_id trap_id)
+{
+       struct mlxsw_event_listener *el;
+
+       switch (trap_id) {
+       case MLXSW_TRAP_ID_PUDE:
+               el = &mlxsw_sx_pude_event;
+               break;
+       }
+       mlxsw_core_event_listener_unregister(mlxsw_sx->core, el, mlxsw_sx);
+}
+
+static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
+                                     void *priv)
+{
+       struct mlxsw_sx *mlxsw_sx = priv;
+       struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
+       struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
+
+       if (unlikely(!mlxsw_sx_port)) {
+               if (net_ratelimit())
+                       dev_warn(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
+                                local_port);
+               return;
+       }
+
+       skb->dev = mlxsw_sx_port->dev;
+
+       pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
+       u64_stats_update_begin(&pcpu_stats->syncp);
+       pcpu_stats->rx_packets++;
+       pcpu_stats->rx_bytes += skb->len;
+       u64_stats_update_end(&pcpu_stats->syncp);
+
+       skb->protocol = eth_type_trans(skb, skb->dev);
+       netif_receive_skb(skb);
+}
+
+static const struct mlxsw_rx_listener mlxsw_sx_rx_listener[] = {
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_FDB_MC,
+       },
+       /* Traps for specific L2 packet types, not trapped as FDB MC */
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_STP,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_LACP,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_EAPOL,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_LLDP,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_MMRP,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_MVRP,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_RPVST,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_DHCP,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
+       },
+       {
+               .func = mlxsw_sx_rx_listener_func,
+               .local_port = MLXSW_PORT_DONT_CARE,
+               .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
+       },
+};
+
+static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
+{
+       char htgt_pl[MLXSW_REG_HTGT_LEN];
+       char hpkt_pl[MLXSW_REG_HPKT_LEN];
+       int i;
+       int err;
+
+       mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
+       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
+       if (err)
+               return err;
+
+       for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
+               err = mlxsw_core_rx_listener_register(mlxsw_sx->core,
+                                                     &mlxsw_sx_rx_listener[i],
+                                                     mlxsw_sx);
+               if (err)
+                       goto err_rx_listener_register;
+
+               mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
+                                   MLXSW_REG_HTGT_TRAP_GROUP_RX,
+                                   mlxsw_sx_rx_listener[i].trap_id);
+               err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+               if (err)
+                       goto err_rx_trap_set;
+       }
+       return 0;
+
+err_rx_trap_set:
+       mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+                                         &mlxsw_sx_rx_listener[i],
+                                         mlxsw_sx);
+err_rx_listener_register:
+       for (i--; i >= 0; i--) {
+               mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+                                   MLXSW_REG_HTGT_TRAP_GROUP_RX,
+                                   mlxsw_sx_rx_listener[i].trap_id);
+               mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+
+               mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+                                                 &mlxsw_sx_rx_listener[i],
+                                                 mlxsw_sx);
+       }
+       return err;
+}
+
+static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
+{
+       char hpkt_pl[MLXSW_REG_HPKT_LEN];
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
+               mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+                                   MLXSW_REG_HTGT_TRAP_GROUP_RX,
+                                   mlxsw_sx_rx_listener[i].trap_id);
+               mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
+
+               mlxsw_core_rx_listener_unregister(mlxsw_sx->core,
+                                                 &mlxsw_sx_rx_listener[i],
+                                                 mlxsw_sx);
+       }
+}
+
+static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
+{
+       char sfgc_pl[MLXSW_REG_SFGC_LEN];
+       char sgcr_pl[MLXSW_REG_SGCR_LEN];
+       char *smid_pl;
+       char *sftr_pl;
+       int err;
+
+       /* Due to FW bug, we must configure SMID. */
+       smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
+       if (!smid_pl)
+               return -ENOMEM;
+       mlxsw_reg_smid_pack(smid_pl, MLXSW_PORT_MID);
+       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(smid), smid_pl);
+       kfree(smid_pl);
+       if (err)
+               return err;
+
+       /* Configure a flooding table, which includes only CPU port. */
+       sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+       if (!sftr_pl)
+               return -ENOMEM;
+       mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0);
+       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
+       kfree(sftr_pl);
+       if (err)
+               return err;
+
+       /* Flood different packet types using the flooding table. */
+       mlxsw_reg_sfgc_pack(sfgc_pl,
+                           MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
+                           MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+                           MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+                           0);
+       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+       if (err)
+               return err;
+
+       mlxsw_reg_sfgc_pack(sfgc_pl,
+                           MLXSW_REG_SFGC_TYPE_BROADCAST,
+                           MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+                           MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+                           0);
+       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+       if (err)
+               return err;
+
+       mlxsw_reg_sfgc_pack(sfgc_pl,
+                           MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
+                           MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+                           MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+                           0);
+       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+       if (err)
+               return err;
+
+       mlxsw_reg_sfgc_pack(sfgc_pl,
+                           MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
+                           MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+                           MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+                           0);
+       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+       if (err)
+               return err;
+
+       mlxsw_reg_sfgc_pack(sfgc_pl,
+                           MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
+                           MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID,
+                           MLXSW_REG_SFGC_TABLE_TYPE_SINGLE,
+                           0);
+       err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sfgc), sfgc_pl);
+       if (err)
+               return err;
+
+       mlxsw_reg_sgcr_pack(sgcr_pl, true);
+       return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
+}
+
+static int mlxsw_sx_init(void *priv, struct mlxsw_core *mlxsw_core,
+                        const struct mlxsw_bus_info *mlxsw_bus_info)
+{
+       struct mlxsw_sx *mlxsw_sx = priv;
+       int err;
+
+       mlxsw_sx->core = mlxsw_core;
+       mlxsw_sx->bus_info = mlxsw_bus_info;
+
+       err = mlxsw_sx_hw_id_get(mlxsw_sx);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Failed to get switch HW ID\n");
+               return err;
+       }
+
+       err = mlxsw_sx_ports_create(mlxsw_sx);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Failed to create ports\n");
+               return err;
+       }
+
+       err = mlxsw_sx_event_register(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Failed to register for PUDE events\n");
+               goto err_event_register;
+       }
+
+       err = mlxsw_sx_traps_init(mlxsw_sx);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Failed to set traps for RX\n");
+               goto err_rx_listener_register;
+       }
+
+       err = mlxsw_sx_flood_init(mlxsw_sx);
+       if (err) {
+               dev_err(mlxsw_sx->bus_info->dev, "Failed to initialize flood tables\n");
+               goto err_flood_init;
+       }
+
+       return 0;
+
+err_flood_init:
+       mlxsw_sx_traps_fini(mlxsw_sx);
+err_rx_listener_register:
+       mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+err_event_register:
+       mlxsw_sx_ports_remove(mlxsw_sx);
+       return err;
+}
+
+static void mlxsw_sx_fini(void *priv)
+{
+       struct mlxsw_sx *mlxsw_sx = priv;
+
+       mlxsw_sx_traps_fini(mlxsw_sx);
+       mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
+       mlxsw_sx_ports_remove(mlxsw_sx);
+}
+
+static struct mlxsw_config_profile mlxsw_sx_config_profile = {
+       .used_max_vepa_channels         = 1,
+       .max_vepa_channels              = 0,
+       .used_max_lag                   = 1,
+       .max_lag                        = 64,
+       .used_max_port_per_lag          = 1,
+       .max_port_per_lag               = 16,
+       .used_max_mid                   = 1,
+       .max_mid                        = 7000,
+       .used_max_pgt                   = 1,
+       .max_pgt                        = 0,
+       .used_max_system_port           = 1,
+       .max_system_port                = 48000,
+       .used_max_vlan_groups           = 1,
+       .max_vlan_groups                = 127,
+       .used_max_regions               = 1,
+       .max_regions                    = 400,
+       .used_flood_tables              = 1,
+       .max_flood_tables               = 2,
+       .max_vid_flood_tables           = 1,
+       .used_flood_mode                = 1,
+       .flood_mode                     = 3,
+       .used_max_ib_mc                 = 1,
+       .max_ib_mc                      = 0,
+       .used_max_pkey                  = 1,
+       .max_pkey                       = 0,
+       .swid_config                    = {
+               {
+                       .used_type      = 1,
+                       .type           = MLXSW_PORT_SWID_TYPE_ETH,
+               }
+       },
+};
+
+static struct mlxsw_driver mlxsw_sx_driver = {
+       .kind                   = MLXSW_DEVICE_KIND_SWITCHX2,
+       .owner                  = THIS_MODULE,
+       .priv_size              = sizeof(struct mlxsw_sx),
+       .init                   = mlxsw_sx_init,
+       .fini                   = mlxsw_sx_fini,
+       .txhdr_construct        = mlxsw_sx_txhdr_construct,
+       .txhdr_len              = MLXSW_TXHDR_LEN,
+       .profile                = &mlxsw_sx_config_profile,
+};
+
+static int __init mlxsw_sx_module_init(void)
+{
+       return mlxsw_core_driver_register(&mlxsw_sx_driver);
+}
+
+static void __exit mlxsw_sx_module_exit(void)
+{
+       mlxsw_core_driver_unregister(&mlxsw_sx_driver);
+}
+
+module_init(mlxsw_sx_module_init);
+module_exit(mlxsw_sx_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox SwitchX-2 driver");
+MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SWITCHX2);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
new file mode 100644 (file)
index 0000000..53a9550
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/trap.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MLXSW_TRAP_H
+#define _MLXSW_TRAP_H
+
+enum {
+       /* Ethernet EMAD and FDB miss */
+       MLXSW_TRAP_ID_FDB_MC = 0x01,
+       MLXSW_TRAP_ID_ETHEMAD = 0x05,
+       /* L2 traps for specific packet types */
+       MLXSW_TRAP_ID_STP = 0x10,
+       MLXSW_TRAP_ID_LACP = 0x11,
+       MLXSW_TRAP_ID_EAPOL = 0x12,
+       MLXSW_TRAP_ID_LLDP = 0x13,
+       MLXSW_TRAP_ID_MMRP = 0x14,
+       MLXSW_TRAP_ID_MVRP = 0x15,
+       MLXSW_TRAP_ID_RPVST = 0x16,
+       MLXSW_TRAP_ID_DHCP = 0x19,
+       MLXSW_TRAP_ID_IGMP_QUERY = 0x30,
+       MLXSW_TRAP_ID_IGMP_V1_REPORT = 0x31,
+       MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32,
+       MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33,
+       MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
+
+       MLXSW_TRAP_ID_MAX = 0x1FF
+};
+
+enum mlxsw_event_trap_id {
+       /* Port Up/Down event generated by hardware */
+       MLXSW_TRAP_ID_PUDE = 0x8,
+};
+
+#endif /* _MLXSW_TRAP_H */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h
new file mode 100644 (file)
index 0000000..06fc46c
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/txheader.h
+ * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_TXHEADER_H
+#define _MLXSW_TXHEADER_H
+
+#define MLXSW_TXHDR_LEN 0x10
+#define MLXSW_TXHDR_VERSION_0 0
+
+enum {
+       MLXSW_TXHDR_ETH_CTL,
+       MLXSW_TXHDR_ETH_DATA,
+};
+
+#define MLXSW_TXHDR_PROTO_ETH 1
+
+enum {
+       MLXSW_TXHDR_ETCLASS_0,
+       MLXSW_TXHDR_ETCLASS_1,
+       MLXSW_TXHDR_ETCLASS_2,
+       MLXSW_TXHDR_ETCLASS_3,
+       MLXSW_TXHDR_ETCLASS_4,
+       MLXSW_TXHDR_ETCLASS_5,
+       MLXSW_TXHDR_ETCLASS_6,
+       MLXSW_TXHDR_ETCLASS_7,
+};
+
+enum {
+       MLXSW_TXHDR_RDQ_OTHER,
+       MLXSW_TXHDR_RDQ_EMAD = 0x1f,
+};
+
+#define MLXSW_TXHDR_CTCLASS3 0
+#define MLXSW_TXHDR_CPU_SIG 0
+#define MLXSW_TXHDR_SIG 0xE0E0
+#define MLXSW_TXHDR_STCLASS_NONE 0
+
+enum {
+       MLXSW_TXHDR_NOT_EMAD,
+       MLXSW_TXHDR_EMAD,
+};
+
+enum {
+       MLXSW_TXHDR_TYPE_DATA,
+       MLXSW_TXHDR_TYPE_CONTROL = 6,
+};
+
+#endif
index c28111749e1f9ba95c8b49231da95f97278f31e8..2d1b9427407982b43673e96a085c7a9ff69e0a53 100644 (file)
@@ -8226,31 +8226,7 @@ static void s2io_rem_nic(struct pci_dev *pdev)
        pci_disable_device(pdev);
 }
 
-/**
- * s2io_starter - Entry point for the driver
- * Description: This function is the entry point for the driver. It verifies
- * the module loadable parameters and initializes PCI configuration space.
- */
-
-static int __init s2io_starter(void)
-{
-       return pci_register_driver(&s2io_driver);
-}
-
-/**
- * s2io_closer - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver. It
- * unregisters the driver.
- */
-
-static __exit void s2io_closer(void)
-{
-       pci_unregister_driver(&s2io_driver);
-       DBG_PRINT(INIT_DBG, "cleanup done\n");
-}
-
-module_init(s2io_starter);
-module_exit(s2io_closer);
+module_pci_driver(s2io_driver);
 
 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
                                struct tcphdr **tcp, struct RxD_t *rxdp,
index d89b6ed82c51ac37d18dda4bfdd9937a0605c7f7..6c5997dc8afc064076e1a00ff726b3d87d3515c9 100644 (file)
@@ -1085,8 +1085,6 @@ static void s2io_txpic_intr_handle(struct s2io_nic *sp);
 static void tx_intr_handler(struct fifo_info *fifo_data);
 static void s2io_handle_errors(void * dev_id);
 
-static int s2io_starter(void);
-static void s2io_closer(void);
 static void s2io_tx_watchdog(struct net_device *dev);
 static void s2io_set_multicast(struct net_device *dev);
 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
index 2f6cc423ab1dff21cf810c5e6737af58dbae0686..7dbab3c20db5811d333f8f817769b44b79d57bbf 100644 (file)
@@ -2403,7 +2403,6 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
                        qlcnic_free_tx_rings(adapter);
                        return -ENOMEM;
                }
-               memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
                tx_ring->cmd_buf_arr = cmd_buf_arr;
                spin_lock_init(&tx_ring->tx_clean_lock);
        }
index 8aa50ac4e2d619ea62155442c34df76965133173..a157aaaaff6a183b161f5d7469fd8a7602be729d 100644 (file)
@@ -658,6 +658,8 @@ struct ravb_desc {
        __le32 dptr;    /* Descriptor pointer */
 };
 
+#define DPTR_ALIGN     4       /* Required descriptor pointer alignment */
+
 enum DIE_DT {
        /* Frame data */
        DT_FMID         = 0x40,
@@ -739,6 +741,7 @@ enum RAVB_QUEUE {
 #define RX_QUEUE_OFFSET        4
 #define NUM_RX_QUEUE   2
 #define NUM_TX_QUEUE   2
+#define NUM_TX_DESC    2       /* TX descriptors per packet */
 
 struct ravb_tstamp_skb {
        struct list_head list;
@@ -777,9 +780,9 @@ struct ravb_private {
        dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
        struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
        struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
+       void *tx_align[NUM_TX_QUEUE];
        struct sk_buff **rx_skb[NUM_RX_QUEUE];
        struct sk_buff **tx_skb[NUM_TX_QUEUE];
-       void **tx_buffers[NUM_TX_QUEUE];
        u32 rx_over_errors;
        u32 rx_fifo_errors;
        struct net_device_stats stats[NUM_RX_QUEUE];
index 78849dd4ef8e9463c2420c84cc583a927492df58..3d972d8194200a693e91082f687bddc18a14ca7d 100644 (file)
@@ -195,12 +195,8 @@ static void ravb_ring_free(struct net_device *ndev, int q)
        priv->tx_skb[q] = NULL;
 
        /* Free aligned TX buffers */
-       if (priv->tx_buffers[q]) {
-               for (i = 0; i < priv->num_tx_ring[q]; i++)
-                       kfree(priv->tx_buffers[q][i]);
-       }
-       kfree(priv->tx_buffers[q]);
-       priv->tx_buffers[q] = NULL;
+       kfree(priv->tx_align[q]);
+       priv->tx_align[q] = NULL;
 
        if (priv->rx_ring[q]) {
                ring_size = sizeof(struct ravb_ex_rx_desc) *
@@ -212,7 +208,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
 
        if (priv->tx_ring[q]) {
                ring_size = sizeof(struct ravb_tx_desc) *
-                           (priv->num_tx_ring[q] + 1);
+                           (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
                dma_free_coherent(NULL, ring_size, priv->tx_ring[q],
                                  priv->tx_desc_dma[q]);
                priv->tx_ring[q] = NULL;
@@ -223,11 +219,12 @@ static void ravb_ring_free(struct net_device *ndev, int q)
 static void ravb_ring_format(struct net_device *ndev, int q)
 {
        struct ravb_private *priv = netdev_priv(ndev);
-       struct ravb_ex_rx_desc *rx_desc = NULL;
-       struct ravb_tx_desc *tx_desc = NULL;
-       struct ravb_desc *desc = NULL;
+       struct ravb_ex_rx_desc *rx_desc;
+       struct ravb_tx_desc *tx_desc;
+       struct ravb_desc *desc;
        int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
-       int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
+       int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
+                          NUM_TX_DESC;
        dma_addr_t dma_addr;
        int i;
 
@@ -260,11 +257,12 @@ static void ravb_ring_format(struct net_device *ndev, int q)
 
        memset(priv->tx_ring[q], 0, tx_ring_size);
        /* Build TX ring buffer */
-       for (i = 0; i < priv->num_tx_ring[q]; i++) {
-               tx_desc = &priv->tx_ring[q][i];
+       for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q];
+            i++, tx_desc++) {
+               tx_desc->die_dt = DT_EEMPTY;
+               tx_desc++;
                tx_desc->die_dt = DT_EEMPTY;
        }
-       tx_desc = &priv->tx_ring[q][i];
        tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
        tx_desc->die_dt = DT_LINKFIX; /* type */
 
@@ -285,7 +283,6 @@ static int ravb_ring_init(struct net_device *ndev, int q)
        struct ravb_private *priv = netdev_priv(ndev);
        struct sk_buff *skb;
        int ring_size;
-       void *buffer;
        int i;
 
        /* Allocate RX and TX skb rings */
@@ -305,19 +302,11 @@ static int ravb_ring_init(struct net_device *ndev, int q)
        }
 
        /* Allocate rings for the aligned buffers */
-       priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
-                                     sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
-       if (!priv->tx_buffers[q])
+       priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] +
+                                   DPTR_ALIGN - 1, GFP_KERNEL);
+       if (!priv->tx_align[q])
                goto error;
 
-       for (i = 0; i < priv->num_tx_ring[q]; i++) {
-               buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
-               if (!buffer)
-                       goto error;
-               /* Aligned TX buffer */
-               priv->tx_buffers[q][i] = buffer;
-       }
-
        /* Allocate all RX descriptors. */
        ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
        priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
@@ -329,7 +318,8 @@ static int ravb_ring_init(struct net_device *ndev, int q)
        priv->dirty_rx[q] = 0;
 
        /* Allocate all TX descriptors. */
-       ring_size = sizeof(struct ravb_tx_desc) * (priv->num_tx_ring[q] + 1);
+       ring_size = sizeof(struct ravb_tx_desc) *
+                   (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
        priv->tx_ring[q] = dma_alloc_coherent(NULL, ring_size,
                                              &priv->tx_desc_dma[q],
                                              GFP_KERNEL);
@@ -439,11 +429,12 @@ static int ravb_tx_free(struct net_device *ndev, int q)
        struct net_device_stats *stats = &priv->stats[q];
        struct ravb_tx_desc *desc;
        int free_num = 0;
-       int entry = 0;
+       int entry;
        u32 size;
 
        for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
-               entry = priv->dirty_tx[q] % priv->num_tx_ring[q];
+               entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
+                                            NUM_TX_DESC);
                desc = &priv->tx_ring[q][entry];
                if (desc->die_dt != DT_FEMPTY)
                        break;
@@ -451,14 +442,18 @@ static int ravb_tx_free(struct net_device *ndev, int q)
                dma_rmb();
                size = le16_to_cpu(desc->ds_tagl) & TX_DS;
                /* Free the original skb. */
-               if (priv->tx_skb[q][entry]) {
+               if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
                        dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
                                         size, DMA_TO_DEVICE);
-                       dev_kfree_skb_any(priv->tx_skb[q][entry]);
-                       priv->tx_skb[q][entry] = NULL;
+                       /* Last packet descriptor? */
+                       if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
+                               entry /= NUM_TX_DESC;
+                               dev_kfree_skb_any(priv->tx_skb[q][entry]);
+                               priv->tx_skb[q][entry] = NULL;
+                               stats->tx_packets++;
+                       }
                        free_num++;
                }
-               stats->tx_packets++;
                stats->tx_bytes += size;
                desc->die_dt = DT_EEMPTY;
        }
@@ -512,8 +507,8 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
        struct sk_buff *skb;
        dma_addr_t dma_addr;
        struct timespec64 ts;
-       u16 pkt_len = 0;
        u8  desc_status;
+       u16 pkt_len;
        int limit;
 
        boguscnt = min(boguscnt, *quota);
@@ -1277,44 +1272,60 @@ static void ravb_tx_timeout_work(struct work_struct *work)
 static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
        struct ravb_private *priv = netdev_priv(ndev);
-       struct ravb_tstamp_skb *ts_skb = NULL;
        u16 q = skb_get_queue_mapping(skb);
+       struct ravb_tstamp_skb *ts_skb;
        struct ravb_tx_desc *desc;
        unsigned long flags;
        u32 dma_addr;
        void *buffer;
        u32 entry;
+       u32 len;
 
        spin_lock_irqsave(&priv->lock, flags);
-       if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) {
+       if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) *
+           NUM_TX_DESC) {
                netif_err(priv, tx_queued, ndev,
                          "still transmitting with the full ring!\n");
                netif_stop_subqueue(ndev, q);
                spin_unlock_irqrestore(&priv->lock, flags);
                return NETDEV_TX_BUSY;
        }
-       entry = priv->cur_tx[q] % priv->num_tx_ring[q];
-       priv->tx_skb[q][entry] = skb;
+       entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * NUM_TX_DESC);
+       priv->tx_skb[q][entry / NUM_TX_DESC] = skb;
 
        if (skb_put_padto(skb, ETH_ZLEN))
                goto drop;
 
-       buffer = PTR_ALIGN(priv->tx_buffers[q][entry], RAVB_ALIGN);
-       memcpy(buffer, skb->data, skb->len);
-       desc = &priv->tx_ring[q][entry];
-       desc->ds_tagl = cpu_to_le16(skb->len);
-       dma_addr = dma_map_single(&ndev->dev, buffer, skb->len, DMA_TO_DEVICE);
+       buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
+                entry / NUM_TX_DESC * DPTR_ALIGN;
+       len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
+       memcpy(buffer, skb->data, len);
+       dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE);
        if (dma_mapping_error(&ndev->dev, dma_addr))
                goto drop;
+
+       desc = &priv->tx_ring[q][entry];
+       desc->ds_tagl = cpu_to_le16(len);
+       desc->dptr = cpu_to_le32(dma_addr);
+
+       buffer = skb->data + len;
+       len = skb->len - len;
+       dma_addr = dma_map_single(&ndev->dev, buffer, len, DMA_TO_DEVICE);
+       if (dma_mapping_error(&ndev->dev, dma_addr))
+               goto unmap;
+
+       desc++;
+       desc->ds_tagl = cpu_to_le16(len);
        desc->dptr = cpu_to_le32(dma_addr);
 
        /* TX timestamp required */
        if (q == RAVB_NC) {
                ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
                if (!ts_skb) {
-                       dma_unmap_single(&ndev->dev, dma_addr, skb->len,
+                       desc--;
+                       dma_unmap_single(&ndev->dev, dma_addr, len,
                                         DMA_TO_DEVICE);
-                       goto drop;
+                       goto unmap;
                }
                ts_skb->skb = skb;
                ts_skb->tag = priv->ts_skb_tag++;
@@ -1330,13 +1341,15 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 
        /* Descriptor type must be set after all the above writes */
        dma_wmb();
-       desc->die_dt = DT_FSINGLE;
+       desc->die_dt = DT_FEND;
+       desc--;
+       desc->die_dt = DT_FSTART;
 
        ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
 
-       priv->cur_tx[q]++;
-       if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
-           !ravb_tx_free(ndev, q))
+       priv->cur_tx[q] += NUM_TX_DESC;
+       if (priv->cur_tx[q] - priv->dirty_tx[q] >
+           (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
                netif_stop_subqueue(ndev, q);
 
 exit:
@@ -1344,9 +1357,12 @@ exit:
        spin_unlock_irqrestore(&priv->lock, flags);
        return NETDEV_TX_OK;
 
+unmap:
+       dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
+                        le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE);
 drop:
        dev_kfree_skb_any(skb);
-       priv->tx_skb[q][entry] = NULL;
+       priv->tx_skb[q][entry / NUM_TX_DESC] = NULL;
        goto exit;
 }
 
index 2d8578cade03790782af7e97a1f59f9848301ae6..7b4c3474acfe888665bde2b727d737908e6d789d 100644 (file)
@@ -202,6 +202,7 @@ enum {
        ROCKER_CTRL_IPV4_MCAST,
        ROCKER_CTRL_IPV6_MCAST,
        ROCKER_CTRL_DFLT_BRIDGING,
+       ROCKER_CTRL_DFLT_OVS,
        ROCKER_CTRL_MAX,
 };
 
@@ -321,9 +322,21 @@ static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
        return ntohs(vlan_id);
 }
 
+static bool rocker_port_is_slave(const struct rocker_port *rocker_port,
+                                  const char *kind)
+{
+       return rocker_port->bridge_dev &&
+               !strcmp(rocker_port->bridge_dev->rtnl_link_ops->kind, kind);
+}
+
 static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
 {
-       return !!rocker_port->bridge_dev;
+       return rocker_port_is_slave(rocker_port, "bridge");
+}
+
+static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
+{
+       return rocker_port_is_slave(rocker_port, "openvswitch");
 }
 
 #define ROCKER_OP_FLAG_REMOVE          BIT(0)
@@ -1817,6 +1830,30 @@ rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
        return 0;
 }
 
+static int
+rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
+                                     struct rocker_desc_info *desc_info,
+                                     void *priv)
+{
+       int mtu = *(int *)priv;
+       struct rocker_tlv *cmd_info;
+
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
+                              ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
+               return -EMSGSIZE;
+       cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
+       if (!cmd_info)
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
+                              rocker_port->pport))
+               return -EMSGSIZE;
+       if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
+                              mtu))
+               return -EMSGSIZE;
+       rocker_tlv_nest_end(desc_info, cmd_info);
+       return 0;
+}
+
 static int
 rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
                                  struct rocker_desc_info *desc_info,
@@ -1874,6 +1911,14 @@ static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
                               macaddr, NULL, NULL);
 }
 
+static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
+                                           int mtu)
+{
+       return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE, 0,
+                              rocker_cmd_set_port_settings_mtu_prep,
+                              &mtu, NULL, NULL);
+}
+
 static int rocker_port_set_learning(struct rocker_port *rocker_port,
                                    enum switchdev_trans trans)
 {
@@ -3243,6 +3288,12 @@ static struct rocker_ctrl {
                .bridge = true,
                .copy_to_cpu = true,
        },
+       [ROCKER_CTRL_DFLT_OVS] = {
+               /* pass all pkts up to CPU */
+               .eth_dst = zero_mac,
+               .eth_dst_mask = zero_mac,
+               .acl = true,
+       },
 };
 
 static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
@@ -3755,11 +3806,14 @@ static int rocker_port_stp_update(struct rocker_port *rocker_port,
                break;
        case BR_STATE_LEARNING:
        case BR_STATE_FORWARDING:
-               want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
+               if (!rocker_port_is_ovsed(rocker_port))
+                       want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
                want[ROCKER_CTRL_IPV4_MCAST] = true;
                want[ROCKER_CTRL_IPV6_MCAST] = true;
                if (rocker_port_is_bridged(rocker_port))
                        want[ROCKER_CTRL_DFLT_BRIDGING] = true;
+               else if (rocker_port_is_ovsed(rocker_port))
+                       want[ROCKER_CTRL_DFLT_OVS] = true;
                else
                        want[ROCKER_CTRL_LOCAL_ARP] = true;
                break;
@@ -3983,7 +4037,8 @@ static int rocker_port_open(struct net_device *dev)
 
        napi_enable(&rocker_port->napi_tx);
        napi_enable(&rocker_port->napi_rx);
-       rocker_port_set_enable(rocker_port, true);
+       if (!dev->proto_down)
+               rocker_port_set_enable(rocker_port, true);
        netif_start_queue(dev);
        return 0;
 
@@ -4152,6 +4207,34 @@ static int rocker_port_set_mac_address(struct net_device *dev, void *p)
        return 0;
 }
 
+static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+       int running = netif_running(dev);
+       int err;
+
+#define ROCKER_PORT_MIN_MTU    68
+#define ROCKER_PORT_MAX_MTU    9000
+
+       if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
+               return -EINVAL;
+
+       if (running)
+               rocker_port_stop(dev);
+
+       netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
+       dev->mtu = new_mtu;
+
+       err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
+       if (err)
+               return err;
+
+       if (running)
+               err = rocker_port_open(dev);
+
+       return err;
+}
+
 static int rocker_port_get_phys_port_name(struct net_device *dev,
                                          char *buf, size_t len)
 {
@@ -4167,11 +4250,23 @@ static int rocker_port_get_phys_port_name(struct net_device *dev,
        return err ? -EOPNOTSUPP : 0;
 }
 
+static int rocker_port_change_proto_down(struct net_device *dev,
+                                        bool proto_down)
+{
+       struct rocker_port *rocker_port = netdev_priv(dev);
+
+       if (rocker_port->dev->flags & IFF_UP)
+               rocker_port_set_enable(rocker_port, !proto_down);
+       rocker_port->dev->proto_down = proto_down;
+       return 0;
+}
+
 static const struct net_device_ops rocker_port_netdev_ops = {
        .ndo_open                       = rocker_port_open,
        .ndo_stop                       = rocker_port_stop,
        .ndo_start_xmit                 = rocker_port_xmit,
        .ndo_set_mac_address            = rocker_port_set_mac_address,
+       .ndo_change_mtu                 = rocker_port_change_mtu,
        .ndo_bridge_getlink             = switchdev_port_bridge_getlink,
        .ndo_bridge_setlink             = switchdev_port_bridge_setlink,
        .ndo_bridge_dellink             = switchdev_port_bridge_dellink,
@@ -4179,6 +4274,7 @@ static const struct net_device_ops rocker_port_netdev_ops = {
        .ndo_fdb_del                    = switchdev_port_fdb_del,
        .ndo_fdb_dump                   = switchdev_port_fdb_dump,
        .ndo_get_phys_port_name         = rocker_port_get_phys_port_name,
+       .ndo_change_proto_down          = rocker_port_change_proto_down,
 };
 
 /********************
@@ -4726,6 +4822,7 @@ static int rocker_port_rx_proc(const struct rocker *rocker,
        const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
        struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
        size_t rx_len;
+       u16 rx_flags = 0;
 
        if (!skb)
                return -ENOENT;
@@ -4733,6 +4830,8 @@ static int rocker_port_rx_proc(const struct rocker *rocker,
        rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
        if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
                return -EINVAL;
+       if (attrs[ROCKER_TLV_RX_FLAGS])
+               rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
 
        rocker_dma_rx_ring_skb_unmap(rocker, attrs);
 
@@ -4740,6 +4839,9 @@ static int rocker_port_rx_proc(const struct rocker *rocker,
        skb_put(skb, rx_len);
        skb->protocol = eth_type_trans(skb, rocker_port->dev);
 
+       if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
+               skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
+
        rocker_port->dev->stats.rx_packets++;
        rocker_port->dev->stats.rx_bytes += skb->len;
 
@@ -4877,6 +4979,8 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
        }
        rocker->ports[port_number] = rocker_port;
 
+       switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
+
        rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE);
 
        err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0);
@@ -5156,6 +5260,7 @@ static int rocker_port_bridge_join(struct rocker_port *rocker_port,
                rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
 
        rocker_port->bridge_dev = bridge;
+       switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
 
        return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
                                    untagged_vid, 0);
@@ -5176,6 +5281,8 @@ static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
                rocker_port_internal_vlan_id_get(rocker_port,
                                                 rocker_port->dev->ifindex);
 
+       switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
+                                   false);
        rocker_port->bridge_dev = NULL;
 
        err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
@@ -5190,23 +5297,39 @@ static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
        return err;
 }
 
+
+static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
+                                  struct net_device *master)
+{
+       int err;
+
+       rocker_port->bridge_dev = master;
+
+       err = rocker_port_fwd_disable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
+       if (err)
+               return err;
+       err = rocker_port_fwd_enable(rocker_port, SWITCHDEV_TRANS_NONE, 0);
+
+       return err;
+}
+
 static int rocker_port_master_changed(struct net_device *dev)
 {
        struct rocker_port *rocker_port = netdev_priv(dev);
        struct net_device *master = netdev_master_upper_dev_get(dev);
        int err = 0;
 
-       /* There are currently three cases handled here:
-        * 1. Joining a bridge
-        * 2. Leaving a previously joined bridge
-        * 3. Other, e.g. being added to or removed from a bond or openvswitch,
-        *    in which case nothing is done
-        */
-       if (master && master->rtnl_link_ops &&
-           !strcmp(master->rtnl_link_ops->kind, "bridge"))
-               err = rocker_port_bridge_join(rocker_port, master);
-       else if (rocker_port_is_bridged(rocker_port))
+       /* N.B: Do nothing if the type of master is not supported */
+       if (master && master->rtnl_link_ops) {
+               if (!strcmp(master->rtnl_link_ops->kind, "bridge"))
+                       err = rocker_port_bridge_join(rocker_port, master);
+               else if (!strcmp(master->rtnl_link_ops->kind, "openvswitch"))
+                       err = rocker_port_ovs_changed(rocker_port, master);
+       } else if (rocker_port_is_bridged(rocker_port)) {
                err = rocker_port_bridge_leave(rocker_port);
+       } else if (rocker_port_is_ovsed(rocker_port)) {
+               err = rocker_port_ovs_changed(rocker_port, NULL);
+       }
 
        return err;
 }
index c61fbf968036a3fe4a57f8afbef704bcffa37dc7..12490b2f65040e18a49a4782387736da411c19e5 100644 (file)
@@ -159,6 +159,7 @@ enum {
        ROCKER_TLV_CMD_PORT_SETTINGS_MODE,              /* u8 */
        ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,          /* u8 */
        ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME,         /* binary */
+       ROCKER_TLV_CMD_PORT_SETTINGS_MTU,               /* u16 */
 
        __ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
        ROCKER_TLV_CMD_PORT_SETTINGS_MAX =
@@ -245,6 +246,7 @@ enum {
 #define ROCKER_RX_FLAGS_TCP                    BIT(5)
 #define ROCKER_RX_FLAGS_UDP                    BIT(6)
 #define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD      BIT(7)
+#define ROCKER_RX_FLAGS_FWD_OFFLOAD            BIT(8)
 
 enum {
        ROCKER_TLV_TX_UNSPEC,
index 605cc8948594626783e093db71d474d638a26bc1..06b8061f1b42d7cd76ad51a63ff8969810cfb8c1 100644 (file)
@@ -49,6 +49,12 @@ enum {
  */
 #define HUNT_FILTER_TBL_ROWS 8192
 
+#define EFX_EF10_FILTER_ID_INVALID 0xffff
+struct efx_ef10_dev_addr {
+       u8 addr[ETH_ALEN];
+       u16 id;
+};
+
 struct efx_ef10_filter_table {
 /* The RX match field masks supported by this fw & hw, in order of priority */
        enum efx_filter_match_flags rx_match_flags[
@@ -69,13 +75,14 @@ struct efx_ef10_filter_table {
 /* Shadow of net_device address lists, guarded by mac_lock */
 #define EFX_EF10_FILTER_DEV_UC_MAX     32
 #define EFX_EF10_FILTER_DEV_MC_MAX     256
-       struct {
-               u8 addr[ETH_ALEN];
-               u16 id;
-       } dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX],
-         dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
-       int dev_uc_count;               /* negative for PROMISC */
-       int dev_mc_count;               /* negative for PROMISC/ALLMULTI */
+       struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
+       struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
+       int dev_uc_count;
+       int dev_mc_count;
+/* Indices (like efx_ef10_dev_addr.id) for promisc/allmulti filters */
+       u16 ucdef_id;
+       u16 bcast_id;
+       u16 mcdef_id;
 };
 
 /* An arbitrary search limit for the software hash table */
@@ -387,7 +394,7 @@ static int efx_ef10_probe(struct efx_nic *efx)
         * First try to enable it, then if we get EPERM, just
         * ask if it's already enabled
         */
-       rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
+       rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true, NULL);
        if (rc == 0) {
                nic_data->workaround_35388 = true;
        } else if (rc == -EPERM) {
@@ -984,12 +991,24 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
 static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
 {
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
+#ifdef CONFIG_SFC_SRIOV
+       unsigned int i;
+#endif
 
        /* All our allocations have been reset */
        nic_data->must_realloc_vis = true;
        nic_data->must_restore_filters = true;
        nic_data->must_restore_piobufs = true;
        nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+
+       /* Driver-created vswitches and vports must be re-created */
+       nic_data->must_probe_vswitching = true;
+       nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
+#ifdef CONFIG_SFC_SRIOV
+       if (nic_data->vf)
+               for (i = 0; i < efx->vf_count; i++)
+                       nic_data->vf[i].vport_id = 0;
+#endif
 }
 
 static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
@@ -1034,6 +1053,12 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
 {
        int rc = efx_mcdi_reset(efx, reset_type);
 
+       /* Unprivileged functions return -EPERM, but need to return success
+        * here so that the datapath is brought back up.
+        */
+       if (reset_type == RESET_TYPE_WORLD && rc == -EPERM)
+               rc = 0;
+
        /* If it was a port reset, trigger reallocation of MC resources.
         * Note that on an MC reset nothing needs to be done now because we'll
         * detect the MC reset later and handle it then.
@@ -1558,10 +1583,6 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
        /* All our allocations have been reset */
        efx_ef10_reset_mc_allocations(efx);
 
-       /* Driver-created vswitches and vports must be re-created */
-       nic_data->must_probe_vswitching = true;
-       nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
-
        /* The datapath firmware might have been changed */
        nic_data->must_check_datapath_caps = true;
 
@@ -2197,6 +2218,29 @@ static int efx_ef10_ev_probe(struct efx_channel *channel)
                                    GFP_KERNEL);
 }
 
+static void efx_ef10_ev_fini(struct efx_channel *channel)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
+       MCDI_DECLARE_BUF_ERR(outbuf);
+       struct efx_nic *efx = channel->efx;
+       size_t outlen;
+       int rc;
+
+       MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
+
+       rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
+                         outbuf, sizeof(outbuf), &outlen);
+
+       if (rc && rc != -EALREADY)
+               goto fail;
+
+       return;
+
+fail:
+       efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
+                              outbuf, outlen, rc);
+}
+
 static int efx_ef10_ev_init(struct efx_channel *channel)
 {
        MCDI_DECLARE_BUF(inbuf,
@@ -2208,6 +2252,7 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
        struct efx_ef10_nic_data *nic_data;
        bool supports_rx_merge;
        size_t inlen, outlen;
+       unsigned int enabled, implemented;
        dma_addr_t dma_addr;
        int rc;
        int i;
@@ -2248,30 +2293,52 @@ static int efx_ef10_ev_init(struct efx_channel *channel)
        rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
                          outbuf, sizeof(outbuf), &outlen);
        /* IRQ return is ignored */
-       return rc;
-}
-
-static void efx_ef10_ev_fini(struct efx_channel *channel)
-{
-       MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
-       MCDI_DECLARE_BUF_ERR(outbuf);
-       struct efx_nic *efx = channel->efx;
-       size_t outlen;
-       int rc;
-
-       MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
-
-       rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
-                         outbuf, sizeof(outbuf), &outlen);
+       if (channel->channel || rc)
+               return rc;
 
-       if (rc && rc != -EALREADY)
+       /* Successfully created event queue on channel 0 */
+       rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
+       if (rc == -ENOSYS) {
+               /* GET_WORKAROUNDS was implemented before the bug26807
+                * workaround, thus the latter must be unavailable in this fw
+                */
+               nic_data->workaround_26807 = false;
+               rc = 0;
+       } else if (rc) {
                goto fail;
+       } else {
+               nic_data->workaround_26807 =
+                       !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
+
+               if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 &&
+                   !nic_data->workaround_26807) {
+                       unsigned int flags;
+
+                       rc = efx_mcdi_set_workaround(efx,
+                                                    MC_CMD_WORKAROUND_BUG26807,
+                                                    true, &flags);
+
+                       if (!rc) {
+                               if (flags &
+                                   1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
+                                       netif_info(efx, drv, efx->net_dev,
+                                                  "other functions on NIC have been reset\n");
+                                       /* MC's boot count has incremented */
+                                       ++nic_data->warm_boot_count;
+                               }
+                               nic_data->workaround_26807 = true;
+                       } else if (rc == -EPERM) {
+                               rc = 0;
+                       }
+               }
+       }
 
-       return;
+       if (!rc)
+               return 0;
 
 fail:
-       efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
-                              outbuf, outlen, rc);
+       efx_ef10_ev_fini(channel);
+       return rc;
 }
 
 static void efx_ef10_ev_remove(struct efx_channel *channel)
@@ -3225,6 +3292,19 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
                                               filter_id, false);
 }
 
+static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id)
+{
+       return filter_id % HUNT_FILTER_TBL_ROWS;
+}
+
+static int efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
+                                        enum efx_filter_priority priority,
+                                        u32 filter_id)
+{
+       return efx_ef10_filter_remove_internal(efx, 1U << priority,
+                                              filter_id, true);
+}
+
 static int efx_ef10_filter_get_safe(struct efx_nic *efx,
                                    enum efx_filter_priority priority,
                                    u32 filter_id, struct efx_filter_spec *spec)
@@ -3598,6 +3678,10 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
                goto fail;
        }
 
+       table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
+       table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
+       table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+
        efx->filter_state = table;
        init_waitqueue_head(&table->waitq);
        return 0;
@@ -3700,145 +3784,233 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
        kfree(table);
 }
 
-/* Caller must hold efx->filter_sem for read if race against
- * efx_ef10_filter_table_remove() is possible
- */
-static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+#define EFX_EF10_FILTER_DO_MARK_OLD(id) \
+               if (id != EFX_EF10_FILTER_ID_INVALID) { \
+                       filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \
+                       WARN_ON(!table->entry[filter_idx].spec); \
+                       table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; \
+               }
+static void efx_ef10_filter_mark_old(struct efx_nic *efx)
 {
        struct efx_ef10_filter_table *table = efx->filter_state;
-       struct net_device *net_dev = efx->net_dev;
-       struct efx_filter_spec spec;
-       bool remove_failed = false;
-       struct netdev_hw_addr *uc;
-       struct netdev_hw_addr *mc;
-       unsigned int filter_idx;
-       int i, n, rc;
-
-       if (!efx_dev_registered(efx))
-               return;
+       unsigned int filter_idx, i;
 
        if (!table)
                return;
 
        /* Mark old filters that may need to be removed */
        spin_lock_bh(&efx->filter_lock);
-       n = table->dev_uc_count < 0 ? 1 : table->dev_uc_count;
-       for (i = 0; i < n; i++) {
-               filter_idx = table->dev_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
-               table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
-       }
-       n = table->dev_mc_count < 0 ? 1 : table->dev_mc_count;
-       for (i = 0; i < n; i++) {
-               filter_idx = table->dev_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
-               table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
-       }
+       for (i = 0; i < table->dev_uc_count; i++)
+               EFX_EF10_FILTER_DO_MARK_OLD(table->dev_uc_list[i].id);
+       for (i = 0; i < table->dev_mc_count; i++)
+               EFX_EF10_FILTER_DO_MARK_OLD(table->dev_mc_list[i].id);
+       EFX_EF10_FILTER_DO_MARK_OLD(table->ucdef_id);
+       EFX_EF10_FILTER_DO_MARK_OLD(table->bcast_id);
+       EFX_EF10_FILTER_DO_MARK_OLD(table->mcdef_id);
        spin_unlock_bh(&efx->filter_lock);
+}
+#undef EFX_EF10_FILTER_DO_MARK_OLD
 
-       /* Copy/convert the address lists; add the primary station
-        * address and broadcast address
-        */
-       netif_addr_lock_bh(net_dev);
-       if (net_dev->flags & IFF_PROMISC ||
-           netdev_uc_count(net_dev) >= EFX_EF10_FILTER_DEV_UC_MAX) {
-               table->dev_uc_count = -1;
-       } else {
-               table->dev_uc_count = 1 + netdev_uc_count(net_dev);
-               ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
-               i = 1;
-               netdev_for_each_uc_addr(uc, net_dev) {
-                       ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
-                       i++;
+static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc)
+{
+       struct efx_ef10_filter_table *table = efx->filter_state;
+       struct net_device *net_dev = efx->net_dev;
+       struct netdev_hw_addr *uc;
+       int addr_count;
+       unsigned int i;
+
+       table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
+       addr_count = netdev_uc_count(net_dev);
+       if (net_dev->flags & IFF_PROMISC)
+               *promisc = true;
+       table->dev_uc_count = 1 + addr_count;
+       ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
+       i = 1;
+       netdev_for_each_uc_addr(uc, net_dev) {
+               if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
+                       *promisc = true;
+                       break;
                }
+               ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
+               table->dev_uc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
+               i++;
        }
-       if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
-           netdev_mc_count(net_dev) >= EFX_EF10_FILTER_DEV_MC_MAX) {
-               table->dev_mc_count = -1;
-       } else {
-               table->dev_mc_count = 1 + netdev_mc_count(net_dev);
-               eth_broadcast_addr(table->dev_mc_list[0].addr);
-               i = 1;
-               netdev_for_each_mc_addr(mc, net_dev) {
-                       ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
-                       i++;
+}
+
+static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc)
+{
+       struct efx_ef10_filter_table *table = efx->filter_state;
+       struct net_device *net_dev = efx->net_dev;
+       struct netdev_hw_addr *mc;
+       unsigned int i, addr_count;
+
+       table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+       table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
+       if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
+               *promisc = true;
+
+       addr_count = netdev_mc_count(net_dev);
+       i = 0;
+       netdev_for_each_mc_addr(mc, net_dev) {
+               if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
+                       *promisc = true;
+                       break;
                }
+               ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
+               table->dev_mc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
+               i++;
        }
-       netif_addr_unlock_bh(net_dev);
 
-       /* Insert/renew unicast filters */
-       if (table->dev_uc_count >= 0) {
-               for (i = 0; i < table->dev_uc_count; i++) {
-                       efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
-                                          EFX_FILTER_FLAG_RX_RSS,
-                                          0);
-                       efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
-                                                table->dev_uc_list[i].addr);
-                       rc = efx_ef10_filter_insert(efx, &spec, true);
-                       if (rc < 0) {
-                               /* Fall back to unicast-promisc */
-                               while (i--)
-                                       efx_ef10_filter_remove_safe(
+       table->dev_mc_count = i;
+}
+
+static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
+                                            bool multicast, bool rollback)
+{
+       struct efx_ef10_filter_table *table = efx->filter_state;
+       struct efx_ef10_dev_addr *addr_list;
+       struct efx_filter_spec spec;
+       u8 baddr[ETH_ALEN];
+       unsigned int i, j;
+       int addr_count;
+       int rc;
+
+       if (multicast) {
+               addr_list = table->dev_mc_list;
+               addr_count = table->dev_mc_count;
+       } else {
+               addr_list = table->dev_uc_list;
+               addr_count = table->dev_uc_count;
+       }
+
+       /* Insert/renew filters */
+       for (i = 0; i < addr_count; i++) {
+               efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+                                  EFX_FILTER_FLAG_RX_RSS,
+                                  0);
+               efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
+                                        addr_list[i].addr);
+               rc = efx_ef10_filter_insert(efx, &spec, true);
+               if (rc < 0) {
+                       if (rollback) {
+                               netif_info(efx, drv, efx->net_dev,
+                                          "efx_ef10_filter_insert failed rc=%d\n",
+                                          rc);
+                               /* Fall back to promiscuous */
+                               for (j = 0; j < i; j++) {
+                                       if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
+                                               continue;
+                                       efx_ef10_filter_remove_unsafe(
                                                efx, EFX_FILTER_PRI_AUTO,
-                                               table->dev_uc_list[i].id);
-                               table->dev_uc_count = -1;
-                               break;
+                                               addr_list[j].id);
+                                       addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
+                               }
+                               return rc;
+                       } else {
+                               /* mark as not inserted, and carry on */
+                               rc = EFX_EF10_FILTER_ID_INVALID;
                        }
-                       table->dev_uc_list[i].id = rc;
                }
+               addr_list[i].id = efx_ef10_filter_get_unsafe_id(efx, rc);
        }
-       if (table->dev_uc_count < 0) {
+
+       if (multicast && rollback) {
+               /* Also need an Ethernet broadcast filter */
                efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
                                   EFX_FILTER_FLAG_RX_RSS,
                                   0);
-               efx_filter_set_uc_def(&spec);
+               eth_broadcast_addr(baddr);
+               efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr);
                rc = efx_ef10_filter_insert(efx, &spec, true);
                if (rc < 0) {
-                       WARN_ON(1);
-                       table->dev_uc_count = 0;
+                       netif_warn(efx, drv, efx->net_dev,
+                                  "Broadcast filter insert failed rc=%d\n", rc);
+                       /* Fall back to promiscuous */
+                       for (j = 0; j < i; j++) {
+                               if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
+                                       continue;
+                               efx_ef10_filter_remove_unsafe(
+                                       efx, EFX_FILTER_PRI_AUTO,
+                                       addr_list[j].id);
+                               addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
+                       }
+                       return rc;
                } else {
-                       table->dev_uc_list[0].id = rc;
+                       table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
                }
        }
 
-       /* Insert/renew multicast filters */
-       if (table->dev_mc_count >= 0) {
-               for (i = 0; i < table->dev_mc_count; i++) {
+       return 0;
+}
+
+static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
+                                     bool rollback)
+{
+       struct efx_ef10_filter_table *table = efx->filter_state;
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       struct efx_filter_spec spec;
+       u8 baddr[ETH_ALEN];
+       int rc;
+
+       efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+                          EFX_FILTER_FLAG_RX_RSS,
+                          0);
+
+       if (multicast)
+               efx_filter_set_mc_def(&spec);
+       else
+               efx_filter_set_uc_def(&spec);
+
+       rc = efx_ef10_filter_insert(efx, &spec, true);
+       if (rc < 0) {
+               netif_warn(efx, drv, efx->net_dev,
+                          "%scast mismatch filter insert failed rc=%d\n",
+                          multicast ? "Multi" : "Uni", rc);
+       } else if (multicast) {
+               table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc);
+               if (!nic_data->workaround_26807) {
+                       /* Also need an Ethernet broadcast filter */
                        efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
                                           EFX_FILTER_FLAG_RX_RSS,
                                           0);
+                       eth_broadcast_addr(baddr);
                        efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
-                                                table->dev_mc_list[i].addr);
+                                                baddr);
                        rc = efx_ef10_filter_insert(efx, &spec, true);
                        if (rc < 0) {
-                               /* Fall back to multicast-promisc */
-                               while (i--)
-                                       efx_ef10_filter_remove_safe(
-                                               efx, EFX_FILTER_PRI_AUTO,
-                                               table->dev_mc_list[i].id);
-                               table->dev_mc_count = -1;
-                               break;
+                               netif_warn(efx, drv, efx->net_dev,
+                                          "Broadcast filter insert failed rc=%d\n",
+                                          rc);
+                               if (rollback) {
+                                       /* Roll back the mc_def filter */
+                                       efx_ef10_filter_remove_unsafe(
+                                                       efx, EFX_FILTER_PRI_AUTO,
+                                                       table->mcdef_id);
+                                       table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+                                       return rc;
+                               }
+                       } else {
+                               table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
                        }
-                       table->dev_mc_list[i].id = rc;
-               }
-       }
-       if (table->dev_mc_count < 0) {
-               efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
-                                  EFX_FILTER_FLAG_RX_RSS,
-                                  0);
-               efx_filter_set_mc_def(&spec);
-               rc = efx_ef10_filter_insert(efx, &spec, true);
-               if (rc < 0) {
-                       WARN_ON(1);
-                       table->dev_mc_count = 0;
-               } else {
-                       table->dev_mc_list[0].id = rc;
                }
+               rc = 0;
+       } else {
+               table->ucdef_id = rc;
+               rc = 0;
        }
+       return rc;
+}
+
+/* Remove filters that weren't renewed.  Since nothing else changes the AUTO_OLD
+ * flag or removes these filters, we don't need to hold the filter_lock while
+ * scanning for these filters.
+ */
+static void efx_ef10_filter_remove_old(struct efx_nic *efx)
+{
+       struct efx_ef10_filter_table *table = efx->filter_state;
+       bool remove_failed = false;
+       int i;
 
-       /* Remove filters that weren't renewed.  Since nothing else
-        * changes the AUTO_OLD flag or removes these filters, we
-        * don't need to hold the filter_lock while scanning for
-        * these filters.
-        */
        for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
                if (ACCESS_ONCE(table->entry[i].spec) &
                    EFX_EF10_FILTER_FLAG_AUTO_OLD) {
@@ -3917,6 +4089,87 @@ reset_nic:
        return rc ? rc : rc2;
 }
 
+/* Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
+static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+{
+       struct efx_ef10_filter_table *table = efx->filter_state;
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       struct net_device *net_dev = efx->net_dev;
+       bool uc_promisc = false, mc_promisc = false;
+
+       if (!efx_dev_registered(efx))
+               return;
+
+       if (!table)
+               return;
+
+       efx_ef10_filter_mark_old(efx);
+
+       /* Copy/convert the address lists; add the primary station
+        * address and broadcast address
+        */
+       netif_addr_lock_bh(net_dev);
+       efx_ef10_filter_uc_addr_list(efx, &uc_promisc);
+       efx_ef10_filter_mc_addr_list(efx, &mc_promisc);
+       netif_addr_unlock_bh(net_dev);
+
+       /* Insert/renew unicast filters */
+       if (uc_promisc) {
+               efx_ef10_filter_insert_def(efx, false, false);
+               efx_ef10_filter_insert_addr_list(efx, false, false);
+       } else {
+               /* If any of the filters failed to insert, fall back to
+                * promiscuous mode - add in the uc_def filter.  But keep
+                * our individual unicast filters.
+                */
+               if (efx_ef10_filter_insert_addr_list(efx, false, false))
+                       efx_ef10_filter_insert_def(efx, false, false);
+       }
+
+       /* Insert/renew multicast filters */
+       /* If changing promiscuous state with cascaded multicast filters, remove
+        * old filters first, so that packets are dropped rather than duplicated
+        */
+       if (nic_data->workaround_26807 && efx->mc_promisc != mc_promisc)
+               efx_ef10_filter_remove_old(efx);
+       if (mc_promisc) {
+               if (nic_data->workaround_26807) {
+                       /* If we failed to insert promiscuous filters, rollback
+                        * and fall back to individual multicast filters
+                        */
+                       if (efx_ef10_filter_insert_def(efx, true, true)) {
+                               /* Changing promisc state, so remove old filters */
+                               efx_ef10_filter_remove_old(efx);
+                               efx_ef10_filter_insert_addr_list(efx, true, false);
+                       }
+               } else {
+                       /* If we failed to insert promiscuous filters, don't
+                        * rollback.  Regardless, also insert the mc_list
+                        */
+                       efx_ef10_filter_insert_def(efx, true, false);
+                       efx_ef10_filter_insert_addr_list(efx, true, false);
+               }
+       } else {
+               /* If any filters failed to insert, rollback and fall back to
+                * promiscuous mode - mc_def filter and maybe broadcast.  If
+                * that fails, roll back again and insert as many of our
+                * individual multicast filters as we can.
+                */
+               if (efx_ef10_filter_insert_addr_list(efx, true, true)) {
+                       /* Changing promisc state, so remove old filters */
+                       if (nic_data->workaround_26807)
+                               efx_ef10_filter_remove_old(efx);
+                       if (efx_ef10_filter_insert_def(efx, true, true))
+                               efx_ef10_filter_insert_addr_list(efx, true, false);
+               }
+       }
+
+       efx_ef10_filter_remove_old(efx);
+       efx->mc_promisc = mc_promisc;
+}
+
 static int efx_ef10_set_mac_address(struct efx_nic *efx)
 {
        MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
@@ -4085,6 +4338,8 @@ efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
        rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
 
 out:
+       if (rc == -EPERM)
+               rc = 0;
        rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
        return rc ? rc : rc2;
 }
index 81640f8bb811b099f6b2afb55cbb55f004c1096e..98d172b04f71815a1105b304ea5cadfa552687a2 100644 (file)
@@ -1779,15 +1779,31 @@ int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
        return rc;
 }
 
-int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
+                           unsigned int *flags)
 {
        MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_WORKAROUND_EXT_OUT_LEN);
+       size_t outlen;
+       int rc;
 
        BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
        MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
        MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
-       return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
-                           NULL, 0, NULL);
+       rc = efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
+                         outbuf, sizeof(outbuf), &outlen);
+       if (rc)
+               return rc;
+
+       if (!flags)
+               return 0;
+
+       if (outlen >= MC_CMD_WORKAROUND_EXT_OUT_LEN)
+               *flags = MCDI_DWORD(outbuf, WORKAROUND_EXT_OUT_FLAGS);
+       else
+               *flags = 0;
+
+       return 0;
 }
 
 int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
@@ -1816,7 +1832,11 @@ int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
        return 0;
 
 fail:
-       netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+       /* Older firmware lacks GET_WORKAROUNDS and this isn't especially
+        * terrifying.  The call site will have to deal with it though.
+        */
+       netif_printk(efx, hw, rc == -ENOSYS ? KERN_DEBUG : KERN_ERR,
+                    efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
        return rc;
 }
 
index 1838afe2da920c59f7ca43744c7a27a0f4e62a6c..025d504c472b5e4ff0bfc516162e69926f65ede6 100644 (file)
@@ -346,7 +346,8 @@ void efx_mcdi_mac_pull_stats(struct efx_nic *efx);
 bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
 enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
 int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
-int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
+                           unsigned int *flags);
 int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
                             unsigned int *enabled_out);
 
index 45fca9fc66b7c9b2512abef1d3363dfbf1ab5344..4cc772164a79be013b7a2a6f66717f6b65c64ee3 100644 (file)
  * Unlike a warm boot, assume DMEM has been reloaded, so that
  * the MC persistent data must be reinitialised. */
 #define MC_FW_TEPID_BOOT_OK (16)
+/* We have entered the main firmware via recovery mode.  This
+ * means that MC persistent data must be reinitialised, but that
+ * we shouldn't touch PCIe config. */
+#define MC_FW_RECOVERY_MODE_PCIE_INIT_OK (32)
 /* BIST state has been initialized */
 #define MC_FW_BIST_INIT_OK (128)
 
 #define MC_CMD_ERR_EINTR 4
 /* I/O failure */
 #define MC_CMD_ERR_EIO 5
+/* Already exists */
+#define MC_CMD_ERR_EEXIST 6
 /* Try again */
 #define MC_CMD_ERR_EAGAIN 11
 /* Out of memory */
 #define MC_CMD_ERR_ENODEV 19
 /* Invalid argument to target */
 #define MC_CMD_ERR_EINVAL 22
+/* Broken pipe */
+#define MC_CMD_ERR_EPIPE 32
+/* Read-only */
+#define MC_CMD_ERR_EROFS 30
 /* Out of range */
 #define MC_CMD_ERR_ERANGE 34
 /* Non-recursive resource is already acquired */
 #define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
 /* The datapath is disabled. */
 #define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
+/* The requesting client is not a function */
+#define MC_CMD_ERR_CLIENT_NOT_FN  0x100c
+/* The requested operation might require the
+   command to be passed between MCs, and the
+   transport doesn't support that.  Should
+   only ever been seen over the UART. */
+#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
+/* VLAN tag(s) exists */
+#define MC_CMD_ERR_VLAN_EXIST 0x100e
+/* No MAC address assigned to an EVB port */
+#define MC_CMD_ERR_NO_MAC_ADDR 0x100f
+/* Notifies the driver that the request has been relayed
+ * to an admin function for authorization. The driver should
+ * wait for a PROXY_RESPONSE event and then resend its request.
+ * This error code is followed by a 32-bit handle that
+ * helps matching it with the respective PROXY_RESPONSE event. */
+#define MC_CMD_ERR_PROXY_PENDING 0x1010
+#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
+/* The request cannot be passed for authorization because
+ * another request from the same function is currently being
+ * authorized. The drvier should try again later. */
+#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011
+/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function
+ * that has enabled proxying or BLOCK_INDEX points to a function that
+ * doesn't await an authorization. */
+#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012
+/* This code is currently only used internally in FW. Its meaning is that
+ * an operation failed due to lack of SR-IOV privilege.
+ * Normally it is translated to EPERM by send_cmd_err(),
+ * but it may also be used to trigger some special mechanism
+ * for handling such case, e.g. to relay the failed request
+ * to a designated admin function for authorization. */
+#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
+/* Workaround 26807 could not be turned on/off because some functions
+ * have already installed filters. See the comment at
+ * MC_CMD_WORKAROUND_BUG26807. */
+#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
 
 #define MC_CMD_ERR_CODE_OFST 0
 
         MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST +            \
         (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
 
+/* This may be ORed with an EVB_PORT_ID_xxx constant to pass a non-default
+ * stack ID (which must be in the range 1-255) along with an EVB port ID.
+ */
+#define EVB_STACK_ID(n)  (((n) & 0xff) << 16)
+
 
 /* Version 2 adds an optional argument to error returns: the errno value
  * may be followed by the (0-based) number of the first argument that
 #define          MCDI_EVENT_AOE_BYTEBLASTER 0x9
 /* enum: DDR ECC status update */
 #define          MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
+/* enum: PTP status update */
+#define          MCDI_EVENT_AOE_PTP_STATUS 0xb
 #define        MCDI_EVENT_AOE_ERR_DATA_LBN 8
 #define        MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
 #define        MCDI_EVENT_RX_ERR_RXQ_LBN 0
 #define        MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12
 #define        MCDI_EVENT_MC_REBOOT_COUNT_LBN 0
 #define        MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16
+#define        MCDI_EVENT_MUM_ERR_TYPE_LBN 0
+#define        MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8
+/* enum: MUM failed to load - no valid image? */
+#define          MCDI_EVENT_MUM_NO_LOAD 0x1
+/* enum: MUM f/w reported an exception */
+#define          MCDI_EVENT_MUM_ASSERT 0x2
+/* enum: MUM not kicking watchdog */
+#define          MCDI_EVENT_MUM_WATCHDOG 0x3
+#define        MCDI_EVENT_MUM_ERR_DATA_LBN 8
+#define        MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
 #define       MCDI_EVENT_DATA_LBN 0
 #define       MCDI_EVENT_DATA_WIDTH 32
 #define       MCDI_EVENT_SRC_LBN 36
 #define       MCDI_EVENT_EV_CODE_WIDTH 4
 #define       MCDI_EVENT_CODE_LBN 44
 #define       MCDI_EVENT_CODE_WIDTH 8
+/* enum: Event generated by host software */
+#define          MCDI_EVENT_SW_EVENT 0x0
 /* enum: Bad assert. */
 #define          MCDI_EVENT_CODE_BADSSERT 0x1
 /* enum: PM Notice. */
 #define          MCDI_EVENT_CODE_MC_BIST 0x19
 /* enum: PTP tick event providing current NIC time */
 #define          MCDI_EVENT_CODE_PTP_TIME 0x1a
+/* enum: MUM fault */
+#define          MCDI_EVENT_CODE_MUM 0x1b
+/* enum: notify the designated PF of a new authorization request */
+#define          MCDI_EVENT_CODE_PROXY_REQUEST 0x1c
+/* enum: notify a function that awaits an authorization that its request has
+ * been processed and it may now resend the command
+ */
+#define          MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
 /* enum: Artificial event generated by host and posted via MC for test
  * purposes.
  */
 /* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
 #define       MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC clock has ever been set
+ */
+#define       MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36
+#define       MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC and System clocks are in sync
+ */
+#define       MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37
+#define       MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of
+ * the minor value of the PTP clock
+ */
+#define       MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
+#define       MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+#define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
+#define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
+#define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
+#define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
+/* Zero means that the request has been completed or authorized, and the driver
+ * should resend it. A non-zero value means that the authorization has been
+ * denied, and gives the reason. Typically it will be EPERM.
+ */
+#define       MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
+#define       MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
 
 /* FCDI_EVENT structuredef */
 #define    FCDI_EVENT_LEN 8
 #define          FCDI_EVENT_CODE_PTP_TICK 0x7
 /* enum: ECC error counters */
 #define          FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
+/* enum: Current status of PTP */
+#define          FCDI_EVENT_CODE_PTP_STATUS 0x9
+/* enum: Port id config to map MC-FC port idx */
+#define          FCDI_EVENT_CODE_PORT_CONFIG 0xa
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
 #define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
 #define       FCDI_EVENT_LINK_STATE_DATA_OFST 0
 #define       FCDI_EVENT_LINK_STATE_DATA_LBN 0
 #define       FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
+#define       FCDI_EVENT_PTP_STATE_OFST 0
+#define          FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
+#define          FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
+#define          FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
+#define       FCDI_EVENT_PTP_STATE_LBN 0
+#define       FCDI_EVENT_PTP_STATE_WIDTH 32
 #define       FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
 #define       FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
 #define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
+/* Index of MC port being referred to */
+#define       FCDI_EVENT_PORT_CONFIG_SRC_LBN 36
+#define       FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
+/* FC Port index that matches the MC port index in SRC */
+#define       FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define       FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
+#define       FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
 
 /* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
  * to the MC. Note that this structure | is overlayed over a normal FCDI event
 #define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
 #define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
 
+/* MUM_EVENT structuredef */
+#define    MUM_EVENT_LEN 8
+#define       MUM_EVENT_CONT_LBN 32
+#define       MUM_EVENT_CONT_WIDTH 1
+#define       MUM_EVENT_LEVEL_LBN 33
+#define       MUM_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define          MUM_EVENT_LEVEL_INFO  0x0
+/* enum: Warning. */
+#define          MUM_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define          MUM_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define          MUM_EVENT_LEVEL_FATAL 0x3
+#define       MUM_EVENT_DATA_OFST 0
+#define        MUM_EVENT_SENSOR_ID_LBN 0
+#define        MUM_EVENT_SENSOR_ID_WIDTH 8
+/*             Enum values, see field(s): */
+/*                MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define        MUM_EVENT_SENSOR_STATE_LBN 8
+#define        MUM_EVENT_SENSOR_STATE_WIDTH 8
+#define        MUM_EVENT_PORT_PHY_READY_LBN 0
+#define        MUM_EVENT_PORT_PHY_READY_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_LINK_UP_LBN 1
+#define        MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_TX_LOL_LBN 2
+#define        MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_RX_LOL_LBN 3
+#define        MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_TX_LOS_LBN 4
+#define        MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_RX_LOS_LBN 5
+#define        MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6
+#define        MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1
+#define       MUM_EVENT_DATA_LBN 0
+#define       MUM_EVENT_DATA_WIDTH 32
+#define       MUM_EVENT_SRC_LBN 36
+#define       MUM_EVENT_SRC_WIDTH 8
+#define       MUM_EVENT_EV_CODE_LBN 60
+#define       MUM_EVENT_EV_CODE_WIDTH 4
+#define       MUM_EVENT_CODE_LBN 44
+#define       MUM_EVENT_CODE_WIDTH 8
+/* enum: The MUM was rebooted. */
+#define          MUM_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define          MUM_EVENT_CODE_ASSERT 0x2
+/* enum: Sensor failure. */
+#define          MUM_EVENT_CODE_SENSOR 0x3
+/* enum: Link fault has been asserted, or has cleared. */
+#define          MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
+#define       MUM_EVENT_SENSOR_DATA_OFST 0
+#define       MUM_EVENT_SENSOR_DATA_LBN 0
+#define       MUM_EVENT_SENSOR_DATA_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define       MUM_EVENT_PORT_PHY_FLAGS_LBN 0
+#define       MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define       MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
+#define       MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define       MUM_EVENT_PORT_PHY_CAPS_LBN 0
+#define       MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */
+#define       MUM_EVENT_PORT_PHY_TECH_LBN 0
+#define       MUM_EVENT_PORT_PHY_TECH_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36
+#define       MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */
+#define       MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40
+#define       MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4
+
 
 /***********************************/
 /* MC_CMD_READ32
 
 /* MC_CMD_COPYCODE_IN msgrequest */
 #define    MC_CMD_COPYCODE_IN_LEN 16
-/* Source address */
-#define       MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
-/* enum: The main image should be entered via a copy of a single word from and
- * to this address when none of the other magic behaviours are required.
+/* Source address
+ *
+ * The main image should be entered via a copy of a single word from and to a
+ * magic address, which controls various aspects of the boot. The magic address
+ * is a bitfield, with each bit as documented below.
  */
+#define       MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
 #define          MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
-/* enum: Entering the main image via a copy of a single word from and to this
- * address indicates that it should not attempt to start the datapath CPUs.
- * This is useful for certain soft rebooting scenarios. (Huntington only)
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below)
  */
 #define          MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
-/* enum: Entering the main image via a copy of a single word from and to this
- * address indicates that it should not attempt to parse any configuration from
- * flash. (In addition, the datapath CPUs will not be started, as for
- * MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR above.) This is useful for
- * certain soft rebooting scenarios. (Huntington only)
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT,
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see
+ * below)
  */
 #define          MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1
 /* Destination address */
 #define       MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
 #define       MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
 #define       MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
 #define       MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
 #define       MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
+/* enum: A magic value hinting that the value in this register at the time of
+ * the failure has likely been lost.
+ */
+#define          MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
 /* Failing thread address */
 #define       MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
 #define       MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
 
 /***********************************/
 /* MC_CMD_LOG_CTRL
- * Configure the output stream for various events and messages.
+ * Configure the output stream for log events such as link state changes,
+ * sensor notifications and MCDI completions
  */
 #define MC_CMD_LOG_CTRL 0x7
 
 #define          MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
 /* enum: Event queue. */
 #define          MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
+/* Legacy argument. Must be zero. */
 #define       MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
 
 /* MC_CMD_LOG_CTRL_OUT msgresponse */
  * input on the same NIC.
  */
 #define          MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
+/* enum: Set the PTP sync status. Status is used by firmware to report to event
+ * subscribers.
+ */
+#define          MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b
 /* enum: Above this for future use. */
-#define          MC_CMD_PTP_OP_MAX 0x1b
+#define          MC_CMD_PTP_OP_MAX 0x1c
 
 /* MC_CMD_PTP_IN_ENABLE msgrequest */
 #define    MC_CMD_PTP_IN_ENABLE_LEN 16
 #define    MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
-/* Event queue to send PTP time events to */
+/* Original field containing queue ID. Now extended to include flags. */
 #define       MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1
 
 /* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
 #define    MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
 /* 1 to enable PPS test mode, 0 to disable and return result. */
 #define       MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
 
+/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
+#define    MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* NIC - Host System Clock Synchronization status */
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+/* enum: Host System clock and NIC clock are not in sync */
+#define          MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
+/* enum: Host System clock and NIC clock are synchronized */
+#define          MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC 0x1
+/* If synchronized, number of seconds until clocks should be considered to be
+ * no longer in sync.
+ */
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+
 /* MC_CMD_PTP_OUT msgresponse */
 #define    MC_CMD_PTP_OUT_LEN 0
 
 #define          MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2
 
 /* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */
-#define    MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 8
+#define    MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 24
 /* Time format required/used by for this NIC. Applies to all PTP MCDI
  * operations that pass times between the host and firmware. If this operation
  * is not supported (older firmware) a format of seconds and nanoseconds should
  * end and start times minus the time that the MC waited for host end.
  */
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+/* Various PTP capabilities */
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
 
 /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
 #define    MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
 /*            Enum values, see field(s): */
 /*               MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
 
+/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */
+#define    MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0
+
 
 /***********************************/
 /* MC_CMD_CSR_READ32
 #define          MC_CMD_FW_FULL_FEATURED 0x0
 /* enum: Prefer to use firmware with fewer features but lower latency */
 #define          MC_CMD_FW_LOW_LATENCY 0x1
+/* enum: Prefer to use firmware for SolarCapture packed stream mode */
+#define          MC_CMD_FW_PACKED_STREAM 0x2
+/* enum: Prefer to use firmware with fewer features and simpler TX event
+ * batching but higher TX packet rate
+ */
+#define          MC_CMD_FW_HIGH_TX_RATE 0x3
+/* enum: Reserved value */
+#define          MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4
 /* enum: Only this option is allowed for non-admin functions */
 #define          MC_CMD_FW_DONT_CARE  0xffffffff
 
 #define          MC_CMD_LOOPBACK_SD_FES_WS  0x22
 /* enum: Near side of AOE Siena side port */
 #define          MC_CMD_LOOPBACK_AOE_INT_NEAR  0x23
+/* enum: Medford Wireside datapath loopback */
+#define          MC_CMD_LOOPBACK_DATA_WS  0x24
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+#define          MC_CMD_LOOPBACK_FORCE_EXT_LINK  0x25
 /* Supported loopbacks. */
 #define       MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
 #define       MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
 #define        MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
 /* This returns the negotiated flow control value. */
 #define       MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
-/* enum: Flow control is off. */
-#define          MC_CMD_FCNTL_OFF 0x0
-/* enum: Respond to flow control. */
-#define          MC_CMD_FCNTL_RESPOND 0x1
-/* enum: Respond to and Issue flow control. */
-#define          MC_CMD_FCNTL_BIDIR 0x2
+/*            Enum values, see field(s): */
+/*               MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
 #define       MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
 #define        MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
 #define        MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
 #define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_LINK
 
 /* MC_CMD_SET_MAC_IN msgrequest */
-#define    MC_CMD_SET_MAC_IN_LEN 24
+#define    MC_CMD_SET_MAC_IN_LEN 28
 /* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
  * EtherII, VLAN, bug16011 padding).
  */
 #define        MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
 #define       MC_CMD_SET_MAC_IN_FCNTL_OFST 20
 /* enum: Flow control is off. */
-/*               MC_CMD_FCNTL_OFF 0x0 */
+#define          MC_CMD_FCNTL_OFF 0x0
 /* enum: Respond to flow control. */
-/*               MC_CMD_FCNTL_RESPOND 0x1 */
+#define          MC_CMD_FCNTL_RESPOND 0x1
 /* enum: Respond to and Issue flow control. */
-/*               MC_CMD_FCNTL_BIDIR 0x2 */
+#define          MC_CMD_FCNTL_BIDIR 0x2
 /* enum: Auto neg flow control. */
 #define          MC_CMD_FCNTL_AUTO 0x3
+/* enum: Priority flow control (eftest builds only). */
+#define          MC_CMD_FCNTL_QBB 0x4
+/* enum: Issue flow control. */
+#define          MC_CMD_FCNTL_GENERATE 0x5
+#define       MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define        MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
+#define        MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
 
 /* MC_CMD_SET_MAC_OUT msgresponse */
 #define    MC_CMD_SET_MAC_OUT_LEN 0
  * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is
  * performed, and the statistics may be read from the message response. If
  * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location).
- * Locks required: None. Returns: 0, ETIME
+ * Locks required: None. The PERIODIC_CLEAR option is not used and now has no
+ * effect. Returns: 0, ETIME
  */
 #define MC_CMD_MAC_STATS 0x2e
 
 #define       MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
 #define       MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
 #define          MC_CMD_MAC_GENERATION_START  0x0 /* enum */
+#define          MC_CMD_MAC_DMABUF_START  0x1 /* enum */
 #define          MC_CMD_MAC_TX_PKTS  0x1 /* enum */
 #define          MC_CMD_MAC_TX_PAUSE_PKTS  0x2 /* enum */
 #define          MC_CMD_MAC_TX_CONTROL_PKTS  0x3 /* enum */
  * PM_AND_RXDP_COUNTERS capability only.
  */
 #define          MC_CMD_MAC_RXDP_STREAMING_PKTS  0x46
-/* enum: RXDP counter: Number of times an emergency descriptor fetch was
- * performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed.
+ * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
  */
 #define          MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS  0x47
 /* enum: RXDP counter: Number of times the DPCPU waited for an existing
 #define          MC_CMD_NVRAM_TYPE_LICENSE 0x12
 /* enum: FC Log. */
 #define          MC_CMD_NVRAM_TYPE_FC_LOG 0x13
+/* enum: Additional flash on FPGA. */
+#define          MC_CMD_NVRAM_TYPE_FC_EXTRA 0x14
 
 
 /***********************************/
  */
 #define MC_CMD_SCHEDINFO 0x3e
 
+#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
 /* MC_CMD_SCHEDINFO_IN msgrequest */
 #define    MC_CMD_SCHEDINFO_IN_LEN 0
 
 #define          MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC  0x2c
 /* enum: Hotpoint temperature: degC */
 #define          MC_CMD_SENSOR_HOTPOINT_TEMP  0x2d
+/* enum: Port 0 PHY power switch over-current: bool */
+#define          MC_CMD_SENSOR_PHY_POWER_PORT0  0x2e
+/* enum: Port 1 PHY power switch over-current: bool */
+#define          MC_CMD_SENSOR_PHY_POWER_PORT1  0x2f
+/* enum: Mop-up microcontroller reference voltage (millivolts) */
+#define          MC_CMD_SENSOR_MUM_VCC  0x30
+/* enum: 0.9v power phase A voltage: mV */
+#define          MC_CMD_SENSOR_IN_0V9_A  0x31
+/* enum: 0.9v power phase A current: mA */
+#define          MC_CMD_SENSOR_IN_I0V9_A  0x32
+/* enum: 0.9V voltage regulator phase A temperature: degC */
+#define          MC_CMD_SENSOR_VREG_0V9_A_TEMP  0x33
+/* enum: 0.9v power phase B voltage: mV */
+#define          MC_CMD_SENSOR_IN_0V9_B  0x34
+/* enum: 0.9v power phase B current: mA */
+#define          MC_CMD_SENSOR_IN_I0V9_B  0x35
+/* enum: 0.9V voltage regulator phase B temperature: degC */
+#define          MC_CMD_SENSOR_VREG_0V9_B_TEMP  0x36
+/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */
+#define          MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY  0x37
+/* enum: CCOM AVREG 1v2 supply (external ADC): mV */
+#define          MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC  0x38
+/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */
+#define          MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY  0x39
+/* enum: CCOM AVREG 1v8 supply (external ADC): mV */
+#define          MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC  0x3a
+/* enum: Not a sensor: reserved for the next page flag */
+#define          MC_CMD_SENSOR_PAGE1_NEXT  0x3f
+/* enum: controller internal temperature sensor voltage on master core
+ * (internal ADC): mV
+ */
+#define          MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT  0x40
+/* enum: controller internal temperature on master core (internal ADC): degC */
+#define          MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP  0x41
+/* enum: controller internal temperature sensor voltage on master core
+ * (external ADC): mV
+ */
+#define          MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC  0x42
+/* enum: controller internal temperature on master core (external ADC): degC */
+#define          MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC  0x43
+/* enum: controller internal temperature on slave core sensor voltage (internal
+ * ADC): mV
+ */
+#define          MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT  0x44
+/* enum: controller internal temperature on slave core (internal ADC): degC */
+#define          MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP  0x45
+/* enum: controller internal temperature on slave core sensor voltage (external
+ * ADC): mV
+ */
+#define          MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC  0x46
+/* enum: controller internal temperature on slave core (external ADC): degC */
+#define          MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC  0x47
+/* enum: Voltage supplied to the SODIMMs from their power supply: mV */
+#define          MC_CMD_SENSOR_SODIMM_VOUT  0x49
+/* enum: Temperature of SODIMM 0 (if installed): degC */
+#define          MC_CMD_SENSOR_SODIMM_0_TEMP  0x4a
+/* enum: Temperature of SODIMM 1 (if installed): degC */
+#define          MC_CMD_SENSOR_SODIMM_1_TEMP  0x4b
+/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */
+#define          MC_CMD_SENSOR_PHY0_VCC  0x4c
+/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */
+#define          MC_CMD_SENSOR_PHY1_VCC  0x4d
 /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
 #define       MC_CMD_SENSOR_ENTRY_OFST 4
 #define       MC_CMD_SENSOR_ENTRY_LEN 8
 #define          MC_CMD_SENSOR_STATE_BROKEN  0x3
 /* enum: Sensor is working but does not currently have a reading. */
 #define          MC_CMD_SENSOR_STATE_NO_READING  0x4
+/* enum: Sensor initialisation failed. */
+#define          MC_CMD_SENSOR_STATE_INIT_FAILED  0x5
 #define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
 #define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
 #define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
 
 /* MC_CMD_WORKAROUND_IN msgrequest */
 #define    MC_CMD_WORKAROUND_IN_LEN 8
+/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
 #define       MC_CMD_WORKAROUND_IN_TYPE_OFST 0
 /* enum: Bug 17230 work around. */
 #define          MC_CMD_WORKAROUND_BUG17230 0x1
 #define          MC_CMD_WORKAROUND_BUG35388 0x2
 /* enum: Bug35017 workaround (A64 tables must be identity map) */
 #define          MC_CMD_WORKAROUND_BUG35017 0x3
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define          MC_CMD_WORKAROUND_BUG41750 0x4
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define          MC_CMD_WORKAROUND_BUG42008 0x5
+/* enum: Bug 26807 features present in firmware (multicast filter chaining)
+ * This feature cannot be turned on/off while there are any filters already
+ * present. The behaviour in such case depends on the acting client's privilege
+ * level. If the client has the admin privilege, then all functions that have
+ * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise
+ * the command will fail with MC_CMD_ERR_FILTERS_PRESENT.
+ */
+#define          MC_CMD_WORKAROUND_BUG26807 0x6
+/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable
+ * the workaround
+ */
 #define       MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
 
 /* MC_CMD_WORKAROUND_OUT msgresponse */
 #define    MC_CMD_WORKAROUND_OUT_LEN 0
 
+/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used
+ * when (TYPE == MC_CMD_WORKAROUND_BUG26807)
+ */
+#define    MC_CMD_WORKAROUND_EXT_OUT_LEN 4
+#define       MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define        MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
+#define        MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
+
 
 /***********************************/
 /* MC_CMD_GET_PHY_MEDIA_INFO
 
 /***********************************/
 /* MC_CMD_GET_MAC_ADDRESSES
- * Returns the base MAC, count and stride for the requestiong function
+ * Returns the base MAC, count and stride for the requesting function
  */
 #define MC_CMD_GET_MAC_ADDRESSES 0x55
 
 /* Spacing of allocated MAC addresses */
 #define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
 
+
+/***********************************/
+/* MC_CMD_CLP
+ * Perform a CLP related operation
+ */
+#define MC_CMD_CLP 0x56
+
+#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CLP_IN msgrequest */
+#define    MC_CMD_CLP_IN_LEN 4
+/* Sub operation */
+#define       MC_CMD_CLP_IN_OP_OFST 0
+/* enum: Return to factory default settings */
+#define          MC_CMD_CLP_OP_DEFAULT 0x1
+/* enum: Set MAC address */
+#define          MC_CMD_CLP_OP_SET_MAC 0x2
+/* enum: Get MAC address */
+#define          MC_CMD_CLP_OP_GET_MAC 0x3
+/* enum: Set UEFI/GPXE boot mode */
+#define          MC_CMD_CLP_OP_SET_BOOT 0x4
+/* enum: Get UEFI/GPXE boot mode */
+#define          MC_CMD_CLP_OP_GET_BOOT 0x5
+
+/* MC_CMD_CLP_OUT msgresponse */
+#define    MC_CMD_CLP_OUT_LEN 0
+
+/* MC_CMD_CLP_IN_DEFAULT msgrequest */
+#define    MC_CMD_CLP_IN_DEFAULT_LEN 4
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
+#define    MC_CMD_CLP_OUT_DEFAULT_LEN 0
+
+/* MC_CMD_CLP_IN_SET_MAC msgrequest */
+#define    MC_CMD_CLP_IN_SET_MAC_LEN 12
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+/* MAC address assigned to port */
+#define       MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
+#define       MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
+/* Padding */
+#define       MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10
+#define       MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_OUT_SET_MAC msgresponse */
+#define    MC_CMD_CLP_OUT_SET_MAC_LEN 0
+
+/* MC_CMD_CLP_IN_GET_MAC msgrequest */
+#define    MC_CMD_CLP_IN_GET_MAC_LEN 4
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
+#define    MC_CMD_CLP_OUT_GET_MAC_LEN 8
+/* MAC address assigned to port */
+#define       MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0
+#define       MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6
+/* Padding */
+#define       MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6
+#define       MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
+#define    MC_CMD_CLP_IN_SET_BOOT_LEN 5
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+/* Boot flag */
+#define       MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
+#define       MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
+
+/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */
+#define    MC_CMD_CLP_OUT_SET_BOOT_LEN 0
+
+/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
+#define    MC_CMD_CLP_IN_GET_BOOT_LEN 4
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
+#define    MC_CMD_CLP_OUT_GET_BOOT_LEN 4
+/* Boot flag */
+#define       MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0
+#define       MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1
+/* Padding */
+#define       MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1
+#define       MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3
+
+
+/***********************************/
+/* MC_CMD_MUM
+ * Perform a MUM operation
+ */
+#define MC_CMD_MUM 0x57
+
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MUM_IN msgrequest */
+#define    MC_CMD_MUM_IN_LEN 4
+#define       MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define        MC_CMD_MUM_IN_OP_LBN 0
+#define        MC_CMD_MUM_IN_OP_WIDTH 8
+/* enum: NULL MCDI command to MUM */
+#define          MC_CMD_MUM_OP_NULL 0x1
+/* enum: Get MUM version */
+#define          MC_CMD_MUM_OP_GET_VERSION 0x2
+/* enum: Issue raw I2C command to MUM */
+#define          MC_CMD_MUM_OP_RAW_CMD 0x3
+/* enum: Read from registers on devices connected to MUM. */
+#define          MC_CMD_MUM_OP_READ 0x4
+/* enum: Write to registers on devices connected to MUM. */
+#define          MC_CMD_MUM_OP_WRITE 0x5
+/* enum: Control UART logging. */
+#define          MC_CMD_MUM_OP_LOG 0x6
+/* enum: Operations on MUM GPIO lines */
+#define          MC_CMD_MUM_OP_GPIO 0x7
+/* enum: Get sensor readings from MUM */
+#define          MC_CMD_MUM_OP_READ_SENSORS 0x8
+/* enum: Initiate clock programming on the MUM */
+#define          MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9
+/* enum: Initiate FPGA load from flash on the MUM */
+#define          MC_CMD_MUM_OP_FPGA_LOAD 0xa
+/* enum: Request sensor reading from MUM ADC resulting from earlier request via
+ * MUM ATB
+ */
+#define          MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb
+/* enum: Send commands relating to the QSFP ports via the MUM for PHY
+ * operations
+ */
+#define          MC_CMD_MUM_OP_QSFP 0xc
+
+/* MC_CMD_MUM_IN_NULL msgrequest */
+#define    MC_CMD_MUM_IN_NULL_LEN 4
+/* MUM cmd header */
+#define       MC_CMD_MUM_IN_CMD_OFST 0
+
+/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
+#define    MC_CMD_MUM_IN_GET_VERSION_LEN 4
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_IN_READ msgrequest */
+#define    MC_CMD_MUM_IN_READ_LEN 16
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/* ID of (device connected to MUM) to read from registers of */
+#define       MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+#define          MC_CMD_MUM_DEV_HITTITE 0x1
+/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
+#define          MC_CMD_MUM_DEV_HITTITE_NIC 0x2
+/* 32-bit address to read from */
+#define       MC_CMD_MUM_IN_READ_ADDR_OFST 8
+/* Number of words to read. */
+#define       MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+
+/* MC_CMD_MUM_IN_WRITE msgrequest */
+#define    MC_CMD_MUM_IN_WRITE_LENMIN 16
+#define    MC_CMD_MUM_IN_WRITE_LENMAX 252
+#define    MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/* ID of (device connected to MUM) to write to registers of */
+#define       MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+/*               MC_CMD_MUM_DEV_HITTITE 0x1 */
+/* 32-bit address to write to */
+#define       MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+/* Words to write */
+#define       MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
+#define       MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
+#define       MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1
+#define       MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60
+
+/* MC_CMD_MUM_IN_RAW_CMD msgrequest */
+#define    MC_CMD_MUM_IN_RAW_CMD_LENMIN 17
+#define    MC_CMD_MUM_IN_RAW_CMD_LENMAX 252
+#define    MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MUM I2C cmd code */
+#define       MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+/* Number of bytes to write */
+#define       MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+/* Number of bytes to read */
+#define       MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+/* Bytes to write */
+#define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
+#define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
+#define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1
+#define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236
+
+/* MC_CMD_MUM_IN_LOG msgrequest */
+#define    MC_CMD_MUM_IN_LOG_LEN 8
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_LOG_OP_OFST 4
+#define          MC_CMD_MUM_IN_LOG_OP_UART  0x1 /* enum */
+
+/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
+#define    MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/* Enable/disable debug output to UART */
+#define       MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+
+/* MC_CMD_MUM_IN_GPIO msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_LEN 8
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
+#define        MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
+#define          MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */
+
+/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+/* The first 32-bit word to be written to the GPIO OUT register. */
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+/* The second 32-bit word to be written to the GPIO OUT register. */
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+
+/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
+#define        MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
+#define          MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */
+#define        MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16
+#define        MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */
+#define    MC_CMD_MUM_IN_READ_SENSORS_LEN 8
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define        MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
+#define        MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
+#define        MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
+#define        MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8
+
+/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */
+#define    MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/* Bit-mask of clocks to be programmed */
+#define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define          MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
+#define          MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
+#define          MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
+/* Control flags for clock programming */
+#define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
+#define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
+
+/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
+#define    MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/* Enable/Disable FPGA config from flash */
+#define       MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+
+/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
+#define    MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_IN_QSFP msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_LEN 12
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define        MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
+#define        MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
+#define          MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
+#define       MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_INIT_LEN 16
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+
+/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+
+/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+
+/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+#define       MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+
+/* MC_CMD_MUM_OUT msgresponse */
+#define    MC_CMD_MUM_OUT_LEN 0
+
+/* MC_CMD_MUM_OUT_NULL msgresponse */
+#define    MC_CMD_MUM_OUT_NULL_LEN 0
+
+/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
+#define    MC_CMD_MUM_OUT_GET_VERSION_LEN 12
+#define       MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
+#define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
+#define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
+
+/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
+#define    MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
+#define    MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252
+#define    MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num))
+/* returned data */
+#define       MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0
+#define       MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1
+#define       MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1
+#define       MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252
+
+/* MC_CMD_MUM_OUT_READ msgresponse */
+#define    MC_CMD_MUM_OUT_READ_LENMIN 4
+#define    MC_CMD_MUM_OUT_READ_LENMAX 252
+#define    MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num))
+#define       MC_CMD_MUM_OUT_READ_BUFFER_OFST 0
+#define       MC_CMD_MUM_OUT_READ_BUFFER_LEN 4
+#define       MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1
+#define       MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63
+
+/* MC_CMD_MUM_OUT_WRITE msgresponse */
+#define    MC_CMD_MUM_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG msgresponse */
+#define    MC_CMD_MUM_OUT_LOG_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */
+#define    MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
+/* The first 32-bit word read from the GPIO IN register. */
+#define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+/* The second 32-bit word read from the GPIO IN register. */
+#define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
+/* The first 32-bit word read from the GPIO OUT register. */
+#define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+/* The second 32-bit word read from the GPIO OUT register. */
+#define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
+#define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
+#define       MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */
+#define    MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4
+#define    MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252
+#define    MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num))
+#define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0
+#define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4
+#define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1
+#define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63
+#define        MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0
+#define        MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16
+#define        MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16
+#define        MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8
+#define        MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24
+#define        MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8
+
+/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
+#define    MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
+#define       MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+
+/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
+#define    MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
+#define    MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
+#define       MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+
+/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
+
+/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
+#define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1
+
+/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
+#define       MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+
+/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
+#define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252
+#define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
+/* in bytes */
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248
+
+/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
+#define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+
+/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
+#define       MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+
 /* MC_CMD_RESOURCE_SPECIFIER enum */
 /* enum: Any */
 #define          MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
 #define          NVRAM_PARTITION_TYPE_PHY_MIN              0xa00
 /* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
 #define          NVRAM_PARTITION_TYPE_PHY_MAX              0xaff
+/* enum: Primary FPGA partition */
+#define          NVRAM_PARTITION_TYPE_FPGA                 0xb00
+/* enum: Secondary FPGA partition */
+#define          NVRAM_PARTITION_TYPE_FPGA_BACKUP          0xb01
+/* enum: FC firmware partition */
+#define          NVRAM_PARTITION_TYPE_FC_FIRMWARE          0xb02
+/* enum: FC License partition */
+#define          NVRAM_PARTITION_TYPE_FC_LICENSE           0xb03
+/* enum: Non-volatile log output partition for FC */
+#define          NVRAM_PARTITION_TYPE_FC_LOG               0xb04
+/* enum: MUM firmware partition */
+#define          NVRAM_PARTITION_TYPE_MUM_FIRMWARE         0xc00
+/* enum: MUM Non-volatile log output partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_LOG              0xc01
+/* enum: MUM Application table partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_APPTABLE         0xc02
+/* enum: MUM boot rom partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_BOOT_ROM         0xc03
+/* enum: MUM production signatures & calibration rom partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_PROD_ROM         0xc04
+/* enum: MUM user signatures & calibration rom partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_USER_ROM         0xc05
+/* enum: MUM fuses and lockbits partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_FUSELOCK         0xc06
 /* enum: Start of reserved value range (firmware may use for any purpose) */
 #define          NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN  0xff00
 /* enum: End of reserved value range (firmware may use for any purpose) */
 #define    LICENSED_APP_ID_LEN 4
 #define       LICENSED_APP_ID_ID_OFST 0
 /* enum: OpenOnload */
-#define          LICENSED_APP_ID_ONLOAD            0x1
+#define          LICENSED_APP_ID_ONLOAD                  0x1
 /* enum: PTP timestamping */
-#define          LICENSED_APP_ID_PTP               0x2
+#define          LICENSED_APP_ID_PTP                     0x2
 /* enum: SolarCapture Pro */
-#define          LICENSED_APP_ID_SOLARCAPTURE_PRO  0x4
+#define          LICENSED_APP_ID_SOLARCAPTURE_PRO        0x4
+/* enum: SolarSecure filter engine */
+#define          LICENSED_APP_ID_SOLARSECURE             0x8
+/* enum: Performance monitor */
+#define          LICENSED_APP_ID_PERF_MONITOR            0x10
+/* enum: SolarCapture Live */
+#define          LICENSED_APP_ID_SOLARCAPTURE_LIVE       0x20
+/* enum: Capture SolarSystem */
+#define          LICENSED_APP_ID_CAPTURE_SOLARSYSTEM     0x40
+/* enum: Network Access Control */
+#define          LICENSED_APP_ID_NETWORK_ACCESS_CONTROL  0x80
 #define       LICENSED_APP_ID_ID_LBN 0
 #define       LICENSED_APP_ID_ID_WIDTH 32
 
-
-/***********************************/
-/* MC_CMD_GET_WORKAROUNDS
- * Read the list of all implemented and all currently enabled workarounds. The
- * enums here must correspond with those in MC_CMD_WORKAROUND.
- */
-#define MC_CMD_GET_WORKAROUNDS 0x59
-
-/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
-#define    MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
-/* Each workaround is represented by a single bit according to the enums below.
- */
-#define       MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
-#define       MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
-/* enum: Bug 17230 work around. */
-#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
-/* enum: Bug 35388 work around (unsafe EVQ writes). */
-#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
-/* enum: Bug35017 workaround (A64 tables must be identity map) */
-#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
-
-
-/***********************************/
-/* MC_CMD_LINK_STATE_MODE
- * Read/set link state mode of a VF
- */
-#define MC_CMD_LINK_STATE_MODE 0x5c
-
-#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
-#define    MC_CMD_LINK_STATE_MODE_IN_LEN 8
-/* The target function to have its link state mode read or set, must be a VF
- * e.g. VF 1,3 = 0x00030001
- */
-#define       MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
-#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
-#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
-#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
-#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
-/* New link state mode to be set */
-#define       MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
-#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO       0x0 /* enum */
-#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP         0x1 /* enum */
-#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN       0x2 /* enum */
-/* enum: Use this value to just read the existing setting without modifying it.
- */
-#define          MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE         0xffffffff
-
-/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
-#define    MC_CMD_LINK_STATE_MODE_OUT_LEN 4
-#define       MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+/* TX_TIMESTAMP_EVENT structuredef */
+#define    TX_TIMESTAMP_EVENT_LEN 6
+/* lower 16 bits of timestamp data */
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_OFST 0
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LEN 2
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LBN 0
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_WIDTH 16
+/* Type of TX event, ordinary TX completion, low or high part of TX timestamp
+ */
+#define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3
+#define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
+/* enum: This is a TX completion event, not a timestamp */
+#define          TX_TIMESTAMP_EVENT_TX_EV_COMPLETION  0x0
+/* enum: This is the low part of a TX timestamp event */
+#define          TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO  0x51
+/* enum: This is the high part of a TX timestamp event */
+#define          TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI  0x52
+#define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24
+#define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8
+/* upper 16 bits of timestamp data */
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_OFST 4
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LEN 2
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LBN 32
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_WIDTH 16
+
+/* RSS_MODE structuredef */
+#define    RSS_MODE_LEN 1
+/* The RSS mode for a particular packet type is a value from 0 - 15 which can
+ * be considered as 4 bits selecting which fields are included in the hash. (A
+ * value 0 effectively disables RSS spreading for the packet type.) The YAML
+ * generation tools require this structure to be a whole number of bytes wide,
+ * but only 4 bits are relevant.
+ */
+#define       RSS_MODE_HASH_SELECTOR_OFST 0
+#define       RSS_MODE_HASH_SELECTOR_LEN 1
+#define        RSS_MODE_HASH_SRC_ADDR_LBN 0
+#define        RSS_MODE_HASH_SRC_ADDR_WIDTH 1
+#define        RSS_MODE_HASH_DST_ADDR_LBN 1
+#define        RSS_MODE_HASH_DST_ADDR_WIDTH 1
+#define        RSS_MODE_HASH_SRC_PORT_LBN 2
+#define        RSS_MODE_HASH_SRC_PORT_WIDTH 1
+#define        RSS_MODE_HASH_DST_PORT_LBN 3
+#define        RSS_MODE_HASH_DST_PORT_WIDTH 1
+#define       RSS_MODE_HASH_SELECTOR_LBN 0
+#define       RSS_MODE_HASH_SELECTOR_WIDTH 8
 
 
 /***********************************/
 
 #define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
-/* MC_CMD_INIT_RXQ_IN msgrequest */
+/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version
+ * in new code.
+ */
 #define    MC_CMD_INIT_RXQ_IN_LENMIN 36
 #define    MC_CMD_INIT_RXQ_IN_LENMAX 252
 #define    MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
 #define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
 #define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
 
+/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode
+ * flags
+ */
+#define    MC_CMD_INIT_RXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define       MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define       MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define       MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define       MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define       MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3
+#define        MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10
+#define        MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define          MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET  0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM  0x1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define        MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M  0x0 /* enum */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K  0x1 /* enum */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K  0x2 /* enum */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K  0x3 /* enum */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K  0x4 /* enum */
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define       MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define       MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define       MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+
 /* MC_CMD_INIT_RXQ_OUT msgresponse */
 #define    MC_CMD_INIT_RXQ_OUT_LEN 0
 
+/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */
+#define    MC_CMD_INIT_RXQ_EXT_OUT_LEN 0
+
 
 /***********************************/
 /* MC_CMD_INIT_TXQ
 
 #define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
-/* MC_CMD_INIT_TXQ_IN msgrequest */
+/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version
+ * in new code.
+ */
 #define    MC_CMD_INIT_TXQ_IN_LENMIN 36
 #define    MC_CMD_INIT_TXQ_IN_LENMAX 252
 #define    MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
 #define        MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1
 #define        MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9
 #define        MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
 /* The port ID associated with the v-adaptor which should contain this DMAQ. */
 #define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
 #define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
 
+/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode
+ * flags
+ */
+#define    MC_CMD_INIT_TXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define       MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define       MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define       MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define       MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define       MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4
+#define        MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define       MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define       MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
+/* Flags related to Qbb flow control mode. */
+#define       MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3
+
 /* MC_CMD_INIT_TXQ_OUT msgresponse */
 #define    MC_CMD_INIT_TXQ_OUT_LEN 0
 
 /* MC_CMD_PROXY_CMD_OUT msgresponse */
 #define    MC_CMD_PROXY_CMD_OUT_LEN 0
 
+/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to
+ * manage proxied requests
+ */
+#define    MC_PROXY_STATUS_BUFFER_LEN 16
+/* Handle allocated by the firmware for this proxy transaction */
+#define       MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
+/* enum: An invalid handle. */
+#define          MC_PROXY_STATUS_BUFFER_HANDLE_INVALID  0x0
+#define       MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
+#define       MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32
+/* The requesting physical function number */
+#define       MC_PROXY_STATUS_BUFFER_PF_OFST 4
+#define       MC_PROXY_STATUS_BUFFER_PF_LEN 2
+#define       MC_PROXY_STATUS_BUFFER_PF_LBN 32
+#define       MC_PROXY_STATUS_BUFFER_PF_WIDTH 16
+/* The requesting virtual function number. Set to VF_NULL if the target is a
+ * PF.
+ */
+#define       MC_PROXY_STATUS_BUFFER_VF_OFST 6
+#define       MC_PROXY_STATUS_BUFFER_VF_LEN 2
+#define       MC_PROXY_STATUS_BUFFER_VF_LBN 48
+#define       MC_PROXY_STATUS_BUFFER_VF_WIDTH 16
+/* The target function RID. */
+#define       MC_PROXY_STATUS_BUFFER_RID_OFST 8
+#define       MC_PROXY_STATUS_BUFFER_RID_LEN 2
+#define       MC_PROXY_STATUS_BUFFER_RID_LBN 64
+#define       MC_PROXY_STATUS_BUFFER_RID_WIDTH 16
+/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */
+#define       MC_PROXY_STATUS_BUFFER_STATUS_OFST 10
+#define       MC_PROXY_STATUS_BUFFER_STATUS_LEN 2
+#define       MC_PROXY_STATUS_BUFFER_STATUS_LBN 80
+#define       MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16
+/* If a request is authorized rather than carried out by the host, this is the
+ * elevated privilege mask granted to the requesting function.
+ */
+#define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
+#define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
+#define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PROXY_CONFIGURE
+ * Enable/disable authorization of MCDI requests from unprivileged functions by
+ * a designated admin function
+ */
+#define MC_CMD_PROXY_CONFIGURE 0x58
+
+#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
+#define    MC_CMD_PROXY_CONFIGURE_IN_LEN 108
+#define       MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
+#define        MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
+#define        MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REQUEST_BLOCK_SIZE.
+ */
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
+/* Must be a power of 2 */
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REPLY_BLOCK_SIZE.
+ */
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+/* Must be a power of 2 */
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
+ * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
+ */
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
+/* Must be a power of 2, or zero if this buffer is not provided */
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
+/* Applies to all three buffers */
+#define       MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
+/* A bit mask defining which MCDI operations may be proxied */
+#define       MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
+#define       MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
+
+/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
+#define    MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_COMPLETE
+ * Tells FW that a requested proxy operation has either been completed (by
+ * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the
+ * function that enabled proxying/authorization (by using
+ * MC_CMD_PROXY_CONFIGURE).
+ */
+#define MC_CMD_PROXY_COMPLETE 0x5f
+
+#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
+#define    MC_CMD_PROXY_COMPLETE_IN_LEN 12
+#define       MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
+#define       MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
+/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
+ * is stored in the REPLY_BUFF.
+ */
+#define          MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0
+/* enum: The operation has been authorized. The originating function may now
+ * try again.
+ */
+#define          MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1
+/* enum: The operation has been declined. */
+#define          MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2
+/* enum: The authorization failed because the relevant application did not
+ * respond in time.
+ */
+#define          MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
+#define       MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
+
+/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
+#define    MC_CMD_PROXY_COMPLETE_OUT_LEN 0
+
 
 /***********************************/
 /* MC_CMD_ALLOC_BUFTBL_CHUNK
 /* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
 #define    MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
 
+/* PORT_CONFIG_ENTRY structuredef */
+#define    PORT_CONFIG_ENTRY_LEN 16
+/* External port number (label) */
+#define       PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0
+#define       PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1
+#define       PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0
+#define       PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8
+/* Port core location */
+#define       PORT_CONFIG_ENTRY_CORE_OFST 1
+#define       PORT_CONFIG_ENTRY_CORE_LEN 1
+#define          PORT_CONFIG_ENTRY_STANDALONE  0x0 /* enum */
+#define          PORT_CONFIG_ENTRY_MASTER  0x1 /* enum */
+#define          PORT_CONFIG_ENTRY_SLAVE  0x2 /* enum */
+#define       PORT_CONFIG_ENTRY_CORE_LBN 8
+#define       PORT_CONFIG_ENTRY_CORE_WIDTH 8
+/* Internal number (HW resource) relative to the core */
+#define       PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2
+#define       PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1
+#define       PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16
+#define       PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8
+/* Reserved */
+#define       PORT_CONFIG_ENTRY_RSVD_OFST 3
+#define       PORT_CONFIG_ENTRY_RSVD_LEN 1
+#define       PORT_CONFIG_ENTRY_RSVD_LBN 24
+#define       PORT_CONFIG_ENTRY_RSVD_WIDTH 8
+/* Bitmask of KR lanes used by the port */
+#define       PORT_CONFIG_ENTRY_LANES_OFST 4
+#define       PORT_CONFIG_ENTRY_LANES_LBN 32
+#define       PORT_CONFIG_ENTRY_LANES_WIDTH 32
+/* Port capabilities (MC_CMD_PHY_CAP_*) */
+#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8
+#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64
+#define       PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32
+/* Reserved (align to 16 bytes) */
+#define       PORT_CONFIG_ENTRY_RSVD2_OFST 12
+#define       PORT_CONFIG_ENTRY_RSVD2_LBN 96
+#define       PORT_CONFIG_ENTRY_RSVD2_WIDTH 32
+
 
 /***********************************/
 /* MC_CMD_FILTER_OP
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_HOST  0x1
 /* enum: receive to MC */
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_MC  0x2
-/* enum: loop back to port 0 TX MAC */
+/* enum: loop back to TXDP 0 */
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_TX0  0x3
-/* enum: loop back to port 1 TX MAC */
+/* enum: loop back to TXDP 1 */
 #define          MC_CMD_FILTER_OP_IN_RX_DEST_TX1  0x4
 /* receive queue handle (for multiple queue modes, this is the base queue) */
 #define       MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
 #define          MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH  0x80000000
 /* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
  * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
- * MC_CMD_DOT1P_MAPPING_ALLOC. Note that these handles should be considered
- * opaque to the host, although a value of 0xFFFFFFFF is guaranteed never to be
- * a valid handle.
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
  */
 #define       MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
 /* transmit domain (reserved; set to 0) */
 #define       MC_CMD_FILTER_OP_IN_DST_IP_OFST 92
 #define       MC_CMD_FILTER_OP_IN_DST_IP_LEN 16
 
+/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to
+ * include handling of VXLAN/NVGRE encapsulated frame filtering (which is
+ * supported on Medford only).
+ */
+#define    MC_CMD_FILTER_OP_EXT_IN_LEN 172
+/* identifies the type of operation requested */
+#define       MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+/*            Enum values, see field(s): */
+/*               MC_CMD_FILTER_OP_IN/OP */
+/* filter handle (for remove / unsubscribe operations) */
+#define       MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4
+#define       MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8
+#define       MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4
+#define       MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+/* fields to include in match criteria */
+#define       MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+/* enum: drop packets */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP  0x0
+/* enum: receive to host */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST  0x1
+/* enum: receive to MC */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC  0x2
+/* enum: loop back to TXDP 0 */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0  0x3
+/* enum: loop back to TXDP 1 */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1  0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+/* receive mode */
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+/* enum: receive to just the specified queue */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE  0x0
+/* enum: receive to multiple queues using RSS context */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS  0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING  0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH  0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+/* transmit domain (reserved; set to 0) */
+#define       MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+/* enum: request default behaviour (based on filter type) */
+#define          MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT  0xffffffff
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60
+#define       MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62
+#define       MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64
+#define       MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define       MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66
+#define       MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define       MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
+ * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
+ * VXLAN/NVGRE, or 1 for Geneve)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8
+/* enum: Match VXLAN traffic with this VNI */
+#define          MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN  0x0
+/* enum: Match Geneve traffic with this VNI */
+#define          MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE  0x1
+/* enum: Reserved for experimental development use */
+#define          MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL  0xfe
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8
+/* enum: Match NVGRE traffic with this VSID */
+#define          MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE  0x0
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16
+/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network
+ * order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6
+/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2
+/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in
+ * network order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6
+/* VXLAN/NVGRE inner frame destination port to match (as bytes in network
+ * order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2
+/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2
+/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to
+ * 0)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2
+/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set
+ * to 0)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
+ * to 0)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16
+/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16
+
 /* MC_CMD_FILTER_OP_OUT msgresponse */
 #define    MC_CMD_FILTER_OP_OUT_LEN 12
 /* identifies the type of operation requested */
 #define       MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
 #define       MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
 #define       MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
+/* enum: guaranteed invalid filter handle (low 32 bits) */
+#define          MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID  0xffffffff
+/* enum: guaranteed invalid filter handle (high 32 bits) */
+#define          MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID  0xffffffff
+
+/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */
+#define    MC_CMD_FILTER_OP_EXT_OUT_LEN 12
+/* identifies the type of operation requested */
+#define       MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+/*            Enum values, see field(s): */
+/*               MC_CMD_FILTER_OP_EXT_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define       MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4
+#define       MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8
+#define       MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4
+#define       MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8
+/*            Enum values, see field(s): */
+/*               MC_CMD_FILTER_OP_OUT/HANDLE */
 
 
 /***********************************/
 #define       MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
 /* enum: read the list of supported RX filter matches */
 #define          MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES  0x1
+/* enum: read flags indicating restrictions on filter insertion for the calling
+ * client
+ */
+#define          MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS  0x2
 
 /* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
 #define    MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
 #define       MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0
 #define       MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61
 
+/* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */
+#define    MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
+/* identifies the type of operation requested */
+#define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+/*            Enum values, see field(s): */
+/*               MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* bitfield of filter insertion restrictions */
+#define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define        MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
+#define        MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
+
 
 /***********************************/
 /* MC_CMD_PARSER_DISP_RW
 #define          MC_CMD_PARSER_DISP_RW_IN_RX_DICPU  0x0
 /* enum: TX dispatcher CPU */
 #define          MC_CMD_PARSER_DISP_RW_IN_TX_DICPU  0x1
-/* enum: Lookup engine */
+/* enum: Lookup engine (with original metadata format) */
 #define          MC_CMD_PARSER_DISP_RW_IN_LUE  0x2
+/* enum: Lookup engine (with requested metadata format) */
+#define          MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA  0x3
 /* identifies the type of operation requested */
 #define       MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
 /* enum: read a word of DICPU DMEM or a LUE entry */
 #define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
 /* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
 #define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
+#define       MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
 /* value to write (for LUE writes) */
 #define       MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
 #define       MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
 /* The maximum number of VIs that would be useful */
 #define       MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
 
-/* MC_CMD_ALLOC_VIS_OUT msgresponse */
+/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
+ * Use extended version in new code.
+ */
 #define    MC_CMD_ALLOC_VIS_OUT_LEN 8
 /* The number of VIs allocated on this function */
 #define       MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
  */
 #define       MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
 
+/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
+#define    MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
+/* The number of VIs allocated on this function */
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+
 
 /***********************************/
 /* MC_CMD_FREE_VIS
 #define    MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
 
 /* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
-#define    MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 8
+#define    MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
 /* The number of VIs allocated on this function */
 #define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
 /* The base absolute VI number allocated to this function. Required to
  * correctly interpret wakeup events.
  */
 #define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
 
 
 /***********************************/
 #define MC_CMD_GET_CAPABILITIES 0xbe
 
 #define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
 /* MC_CMD_GET_CAPABILITIES_IN msgrequest */
 #define    MC_CMD_GET_CAPABILITIES_IN_LEN 0
 
 #define    MC_CMD_GET_CAPABILITIES_OUT_LEN 20
 /* First word of flags. */
 #define       MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19
 #define        MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20
 #define        MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
 #define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
 #define        MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30
 #define        MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1
 /* RxDPCPU firmware id. */
 #define       MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
 #define       MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP  0x0
 /* enum: Low latency RXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY  0x1
+/* enum: Packed stream RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM  0x2
+/* enum: BIST RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST  0x10a
 /* enum: RXDP Test firmware image 1 */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH  0x101
 /* enum: RXDP Test firmware image 2 */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP  0x0
 /* enum: Low latency TXDP firmware */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY  0x1
+/* enum: High packet rate TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE  0x3
+/* enum: BIST TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST  0x12d
 /* enum: TXDP Test firmware image 1 */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT  0x101
 /* enum: TXDP Test firmware image 2 */
 #define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12
 #define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
-#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT  0x1 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH  0x3 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM  0x4 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY  0x5 /* enum */
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED  0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT  0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT  0x2
+/* enum: Virtual switching (full feature) RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH  0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM  0x4
+/* enum: Low latency RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY  0x5
+/* enum: Packed stream RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM  0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF  0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY  0xf
 #define       MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
 #define       MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
 #define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12
 #define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
-#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT  0x1 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH  0x3 /* enum */
-#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM  0x4 /* enum */
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED  0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT  0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT  0x2
+/* enum: Virtual switching (full feature) TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH  0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM  0x4
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY  0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF  0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE  0xe
 /* Hardware capabilities of NIC */
 #define       MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
 /* Licensed capabilities */
 /* the rate in mbps */
 #define       MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
 
+/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
+#define    MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
+/* the bucket id */
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
+/* the rate in mbps */
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
+/* the desired maximum fill level */
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
+
 /* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
 #define    MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
 
 #define       MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
 /* the static priority associated with the txq */
 #define       MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
-/* bitmask of the priority queues this txq is inserted into */
+/* bitmask of the priority queues this txq is inserted into when inserted. */
 #define       MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
 /* the reaction point (RP) bucket */
 #define       MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
 /* an already reserved bucket (typically set to bucket associated with outer
 /* the min bucket (typically for ETS/minimum bandwidth) */
 #define       MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
 
+/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
+#define    MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
+/* the txq id */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
+/* the static priority associated with the txq */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
+/* bitmask of the priority queues this txq is inserted into when inserted. */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
+/* the reaction point (RP) bucket */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
+/* the static priority associated with the txq */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
+
 /* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
 #define    MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
 
 #define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN  0x1
 /* enum: VEB */
 #define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB  0x2
-/* enum: VEPA */
+/* enum: VEPA (obsolete) */
 #define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA  0x3
+/* enum: MUX */
+#define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX  0x4
+/* enum: Snapper specific; semantics TBD */
+#define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST  0x5
 /* Flags controlling v-port creation */
 #define       MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
 #define        MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
 #define        MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
-/* The number of VLAN tags to support. */
+/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
+ * this must be one or greated, and the attached v-ports must have exactly this
+ * number of tags. For other v-switch types, this must be zero of greater, and
+ * is an upper limit on the number of VLAN tags for attached v-ports. An error
+ * will be returned if existing configuration means we can't support attached
+ * v-ports with this number of tags.
+ */
 #define       MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
 
 /* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
 #define       MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
-/* The number of VLAN tags to insert/remove. */
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
 #define       MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
 /* The actual VLAN tags to insert/remove */
 #define       MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
 
 /* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
-/* The handle of the new RSS context */
+/* The handle of the new RSS context. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
 #define       MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+/* enum: guaranteed invalid RSS context handle value */
+#define          MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID  0xffffffff
 
 
 /***********************************/
 #define    MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
 /* The handle of the RSS context */
 #define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
-/* Hash control flags */
+/* Hash control flags. The _EN bits are always supported. The _MODE bits only
+ * work when the firmware reports ADDITIONAL_RSS_MODES in
+ * MC_CMD_GET_CAPABILITIES and override the _EN bits if any of them are not 0.
+ * See the RSS_MODE structure for the meaning of the mode bits.
+ */
 #define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4
 
 /* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0
 
 /* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
-/* Hash control flags */
+/* Hash control flags. If any _MODE bits are non-zero (which will only be true
+ * when the firmware reports ADDITIONAL_RSS_MODES) then the _EN bits should be
+ * disregarded (but are guaranteed to be consistent with the _MODE bits if
+ * RSS_CONTEXT_SET_FLAGS has never been called for this context since it was
+ * allocated).
+ */
 #define       MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4
 
 
 /***********************************/
 
 /* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
 #define    MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
-/* The handle of the new .1p mapping */
+/* The handle of the new .1p mapping. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
 #define       MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+/* enum: guaranteed invalid .1p mapping handle value */
+#define          MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID  0xffffffff
 
 
 /***********************************/
 
 
 /***********************************/
-/* MC_CMD_RMON_RX_CLASS_STATS
- * Retrieve rmon rx class statistics
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS
+ * Add a MAC address to a v-port
  */
-#define MC_CMD_RMON_RX_CLASS_STATS 0xc3
-
-/* MC_CMD_RMON_RX_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_CLASS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_CLASS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_RX_CLASS_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_RX_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
 
-/***********************************/
-/* MC_CMD_RMON_TX_CLASS_STATS
- * Retrieve rmon tx class statistics
- */
-#define MC_CMD_RMON_TX_CLASS_STATS 0xc4
+#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
-/* MC_CMD_RMON_TX_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_CLASS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_CLASS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_TX_CLASS_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_TX_CLASS_STATS_IN_RST_WIDTH 1
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
+#define    MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+/* MAC address to add */
+#define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
 
-/* MC_CMD_RMON_TX_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */
+#define    MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0
 
 
 /***********************************/
-/* MC_CMD_RMON_RX_SUPER_CLASS_STATS
- * Retrieve rmon rx super_class statistics
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS
+ * Delete a MAC address from a v-port
  */
-#define MC_CMD_RMON_RX_SUPER_CLASS_STATS 0xc5
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_LBN 4
-#define        MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
 
-
-/***********************************/
-/* MC_CMD_RMON_TX_SUPER_CLASS_STATS
- * Retrieve rmon tx super_class statistics
- */
-#define MC_CMD_RMON_TX_SUPER_CLASS_STATS 0xc6
-
-/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_LBN 4
-#define        MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS
- * Add qid to class for statistics collection
- */
-#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS 0xc7
-
-/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_LEN 12
-/* class */
-#define       MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
-/* qid */
-#define       MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_QID_OFST 4
-/* flags */
-#define       MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
-#define        MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
-
-/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS
- * Add qid to class for statistics collection
- */
-#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS 0xc8
-
-/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_LEN 12
-/* class */
-#define       MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
-/* qid */
-#define       MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_QID_OFST 4
-/* flags */
-#define       MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
-#define        MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
-
-/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS
- * Add qid to class for statistics collection
- */
-#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS 0xc9
-
-/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_LEN 12
-/* class */
-#define       MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
-/* qid */
-#define       MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_QID_OFST 4
-/* flags */
-#define       MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_LBN 8
-#define        MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
-
-/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_ALLOC_CLASS
- * Allocate an rmon class
- */
-#define MC_CMD_RMON_ALLOC_CLASS 0xca
-
-/* MC_CMD_RMON_ALLOC_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_ALLOC_CLASS_IN_LEN 0
-
-/* MC_CMD_RMON_ALLOC_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_ALLOC_CLASS_OUT_LEN 4
-/* class */
-#define       MC_CMD_RMON_ALLOC_CLASS_OUT_CLASS_OFST 0
-
-
-/***********************************/
-/* MC_CMD_RMON_DEALLOC_CLASS
- * Deallocate an rmon class
- */
-#define MC_CMD_RMON_DEALLOC_CLASS 0xcb
-
-/* MC_CMD_RMON_DEALLOC_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_DEALLOC_CLASS_IN_LEN 4
-/* class */
-#define       MC_CMD_RMON_DEALLOC_CLASS_IN_CLASS_OFST 0
-
-/* MC_CMD_RMON_DEALLOC_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_DEALLOC_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_ALLOC_SUPER_CLASS
- * Allocate an rmon super_class
- */
-#define MC_CMD_RMON_ALLOC_SUPER_CLASS 0xcc
-
-/* MC_CMD_RMON_ALLOC_SUPER_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_ALLOC_SUPER_CLASS_IN_LEN 0
-
-/* MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_LEN 4
-/* super_class */
-#define       MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_SUPER_CLASS_OFST 0
-
-
-/***********************************/
-/* MC_CMD_RMON_DEALLOC_SUPER_CLASS
- * Deallocate an rmon tx super_class
- */
-#define MC_CMD_RMON_DEALLOC_SUPER_CLASS 0xcd
-
-/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN msgrequest */
-#define    MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_LEN 4
-/* super_class */
-#define       MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_SUPER_CLASS_OFST 0
-
-/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT msgresponse */
-#define    MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_UP_CONV_STATS
- * Retrieve up converter statistics
- */
-#define MC_CMD_RMON_RX_UP_CONV_STATS 0xce
-
-/* MC_CMD_RMON_RX_UP_CONV_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_UP_CONV_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_UP_CONV_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_LBN 0
-#define        MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_WIDTH 2
-#define        MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_LBN 2
-#define        MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_UP_CONV_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPI_STATS
- * Retrieve rx ipi stats
- */
-#define MC_CMD_RMON_RX_IPI_STATS 0xcf
-
-/* MC_CMD_RMON_RX_IPI_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_IPI_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_IPI_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_LBN 0
-#define        MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_WIDTH 5
-#define        MC_CMD_RMON_RX_IPI_STATS_IN_RST_LBN 5
-#define        MC_CMD_RMON_RX_IPI_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPI_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_IPI_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_IPI_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_IPI_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS
- * Retrieve rx ipsec cntxt_ptr indexed stats
- */
-#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS 0xd0
-
-/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
-#define        MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
-#define        MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
-#define        MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPSEC_PORT_STATS
- * Retrieve rx ipsec port indexed stats
- */
-#define MC_CMD_RMON_RX_IPSEC_PORT_STATS 0xd1
-
-/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_LBN 0
-#define        MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
-#define        MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_LBN 2
-#define        MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS
- * Retrieve tx ipsec overflow
- */
-#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS 0xd2
-
-/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
-#define        MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
-#define        MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
-#define        MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_VPORT_ADD_MAC_ADDRESS
- * Add a MAC address to a v-port
- */
-#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
-
-#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
-
-/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
-#define    MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
-/* The handle of the v-port */
-#define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
-/* MAC address to add */
-#define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
-#define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
-
-/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */
-#define    MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0
-
-
-/***********************************/
-/* MC_CMD_VPORT_DEL_MAC_ADDRESS
- * Delete a MAC address from a v-port
- */
-#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
-
-#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */
 #define    MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
-/* Raw buffer table entries, laid out as BUFTBL_ENTRY. */
+/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
 #define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
 
 
-/***********************************/
-/* MC_CMD_RMON_RX_CLASS_DROPS_STATS
- * Retrieve rx class drop stats
- */
-#define MC_CMD_RMON_RX_CLASS_DROPS_STATS 0xd3
-
-/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS
- * Retrieve rx super class drop stats
- */
-#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS 0xd4
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_WIDTH 4
-#define        MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_LBN 4
-#define        MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_ERRORS_STATS
- * Retrieve rxdp errors
- */
-#define MC_CMD_RMON_RX_ERRORS_STATS 0xd5
-
-/* MC_CMD_RMON_RX_ERRORS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_ERRORS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_ERRORS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_LBN 0
-#define        MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_WIDTH 11
-#define        MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_LBN 11
-#define        MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_ERRORS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_RX_OVERFLOW_STATS
- * Retrieve rxdp overflow
- */
-#define MC_CMD_RMON_RX_OVERFLOW_STATS 0xd6
-
-/* MC_CMD_RMON_RX_OVERFLOW_STATS_IN msgrequest */
-#define    MC_CMD_RMON_RX_OVERFLOW_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_RX_OVERFLOW_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_RX_OVERFLOW_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPI_STATS
- * Retrieve tx ipi stats
- */
-#define MC_CMD_RMON_TX_IPI_STATS 0xd7
-
-/* MC_CMD_RMON_TX_IPI_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_IPI_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_IPI_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_LBN 0
-#define        MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_WIDTH 5
-#define        MC_CMD_RMON_TX_IPI_STATS_IN_RST_LBN 5
-#define        MC_CMD_RMON_TX_IPI_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPI_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_IPI_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_IPI_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_IPI_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS
- * Retrieve tx ipsec counters by cntxt_ptr
- */
-#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS 0xd8
-
-/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
-#define        MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
-#define        MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
-#define        MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPSEC_PORT_STATS
- * Retrieve tx ipsec counters by port
- */
-#define MC_CMD_RMON_TX_IPSEC_PORT_STATS 0xd9
-
-/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_LBN 0
-#define        MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
-#define        MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_LBN 2
-#define        MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS
- * Retrieve tx ipsec overflow
- */
-#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS 0xda
-
-/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
-#define        MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
-#define        MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
-#define        MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_NOWHERE_STATS
- * Retrieve tx nowhere stats
- */
-#define MC_CMD_RMON_TX_NOWHERE_STATS 0xdb
-
-/* MC_CMD_RMON_TX_NOWHERE_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_NOWHERE_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_NOWHERE_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_NOWHERE_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS
- * Retrieve tx nowhere qbb stats
- */
-#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS 0xdc
-
-/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_LBN 0
-#define        MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_WIDTH 3
-#define        MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_LBN 3
-#define        MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_ERRORS_STATS
- * Retrieve rxdp errors
- */
-#define MC_CMD_RMON_TX_ERRORS_STATS 0xdd
-
-/* MC_CMD_RMON_TX_ERRORS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_ERRORS_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_ERRORS_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_LBN 0
-#define        MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_WIDTH 11
-#define        MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_LBN 11
-#define        MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_ERRORS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_TX_OVERFLOW_STATS
- * Retrieve rxdp overflow
- */
-#define MC_CMD_RMON_TX_OVERFLOW_STATS 0xde
-
-/* MC_CMD_RMON_TX_OVERFLOW_STATS_IN msgrequest */
-#define    MC_CMD_RMON_TX_OVERFLOW_STATS_IN_LEN 4
-/* flags */
-#define       MC_CMD_RMON_TX_OVERFLOW_STATS_IN_FLAGS_OFST 0
-#define        MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_LBN 0
-#define        MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
-#define        MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_LBN 8
-#define        MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_WIDTH 1
-
-/* MC_CMD_RMON_TX_OVERFLOW_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMIN 4
-#define    MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMAX 252
-#define    MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
-/* Array of stats */
-#define       MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
-#define       MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
-#define       MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
-#define       MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
-
-
-/***********************************/
-/* MC_CMD_RMON_COLLECT_CLASS_STATS
- * Explicitly collect class stats at the specified evb port
- */
-#define MC_CMD_RMON_COLLECT_CLASS_STATS 0xdf
-
-/* MC_CMD_RMON_COLLECT_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_COLLECT_CLASS_STATS_IN_LEN 4
-/* The port id associated with the vport/pport at which to collect class stats
- */
-#define       MC_CMD_RMON_COLLECT_CLASS_STATS_IN_PORT_ID_OFST 0
-
-/* MC_CMD_RMON_COLLECT_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_LEN 4
-/* class */
-#define       MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_CLASS_OFST 0
-
-
-/***********************************/
-/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS
- * Explicitly collect class stats at the specified evb port
- */
-#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS 0xe0
-
-/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN msgrequest */
-#define    MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_LEN 4
-/* The port id associated with the vport/pport at which to collect class stats
- */
-#define       MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_PORT_ID_OFST 0
-
-/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT msgresponse */
-#define    MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_LEN 4
-/* super_class */
-#define       MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_SUPER_CLASS_OFST 0
-
-
 /***********************************/
 /* MC_CMD_GET_CLOCK
  * Return the system and PDCPU clock frequencies.
 #define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_SET_CLOCK_IN msgrequest */
-#define    MC_CMD_SET_CLOCK_IN_LEN 12
-/* Requested system frequency in MHz; 0 leaves unchanged. */
+#define    MC_CMD_SET_CLOCK_IN_LEN 28
+/* Requested frequency in MHz for system clock domain */
 #define       MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
-/* Requested inter-core frequency in MHz; 0 leaves unchanged. */
+/* enum: Leave the system clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for inter-core clock domain */
 #define       MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
-/* Request DPCPU frequency in MHz; 0 leaves unchanged. */
+/* enum: Leave the inter-core clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for DPCPU clock domain */
 #define       MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+/* enum: Leave the DPCPU clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for PCS clock domain */
+#define       MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
+/* enum: Leave the PCS clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for MC clock domain */
+#define       MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
+/* enum: Leave the MC clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for rmon clock domain */
+#define       MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
+/* enum: Leave the rmon clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE  0x0
+/* Requested frequency in MHz for vswitch clock domain */
+#define       MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
+/* enum: Leave the vswitch clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE  0x0
 
 /* MC_CMD_SET_CLOCK_OUT msgresponse */
-#define    MC_CMD_SET_CLOCK_OUT_LEN 12
+#define    MC_CMD_SET_CLOCK_OUT_LEN 28
 /* Resulting system frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* enum: The system clock domain doesn't exist */
+#define          MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED  0x0
 /* Resulting inter-core frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+/* enum: The inter-core clock domain doesn't exist / isn't used */
+#define          MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED  0x0
 /* Resulting DPCPU frequency in MHz */
 #define       MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+/* enum: The dpcpu clock domain doesn't exist */
+#define          MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED  0x0
+/* Resulting PCS frequency in MHz */
+#define       MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
+/* enum: The PCS clock domain doesn't exist / isn't controlled */
+#define          MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED  0x0
+/* Resulting MC frequency in MHz */
+#define       MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
+/* enum: The MC clock domain doesn't exist / isn't controlled */
+#define          MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED  0x0
+/* Resulting rmon frequency in MHz */
+#define       MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
+/* enum: The rmon clock domain doesn't exist / isn't controlled */
+#define          MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED  0x0
+/* Resulting vswitch frequency in MHz */
+#define       MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
+/* enum: The vswitch clock domain doesn't exist / isn't controlled */
+#define          MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED  0x0
 
 
 /***********************************/
 /* MC_CMD_DPCPU_RPC_IN msgrequest */
 #define    MC_CMD_DPCPU_RPC_IN_LEN 36
 #define       MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
-/* enum: RxDPCPU */
-#define          MC_CMD_DPCPU_RPC_IN_DPCPU_RX   0x0
+/* enum: RxDPCPU0 */
+#define          MC_CMD_DPCPU_RPC_IN_DPCPU_RX0  0x0
 /* enum: TxDPCPU0 */
 #define          MC_CMD_DPCPU_RPC_IN_DPCPU_TX0  0x1
 /* enum: TxDPCPU1 */
 #define          MC_CMD_DPCPU_RPC_IN_DPCPU_TX1  0x2
+/* enum: RxDPCPU1 (Medford only) */
+#define          MC_CMD_DPCPU_RPC_IN_DPCPU_RX1   0x3
+/* enum: RxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_RX0)
+ */
+#define          MC_CMD_DPCPU_RPC_IN_DPCPU_RX   0x80
+/* enum: TxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_TX0)
+ */
+#define          MC_CMD_DPCPU_RPC_IN_DPCPU_TX   0x81
 /* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
  * initialised to zero
  */
 #define    MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
 
 
+/***********************************/
+/* MC_CMD_SHMBOOT_OP
+ * Special operations to support (for now) shmboot.
+ */
+#define MC_CMD_SHMBOOT_OP 0xe6
+
+#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SHMBOOT_OP_IN msgrequest */
+#define    MC_CMD_SHMBOOT_OP_IN_LEN 4
+/* Identifies the operation to perform */
+#define       MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+/* enum: Copy slave_data section to the slave core. (Greenport only) */
+#define          MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA  0x0
+
+/* MC_CMD_SHMBOOT_OP_OUT msgresponse */
+#define    MC_CMD_SHMBOOT_OP_OUT_LEN 0
+
+
 /***********************************/
 /* MC_CMD_CAP_BLK_READ
  * Read multiple 64bit words from capture block memory
  * more data is returned.
  */
 #define          MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT  0x6
+/* enum: Read Figure Of Merit (eye quality, higher is better). */
+#define          MC_CMD_KR_TUNE_IN_READ_FOM  0x7
 /* Align the arguments to 32 bits */
 #define       MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
 #define       MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
 #define       MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: Attenuation (0-15) */
+/* enum: Attenuation (0-15, TBD for Medford) */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT  0x0
-/* enum: CTLE Boost (0-15) */
+/* enum: CTLE Boost (0-15, TBD for Medford) */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST  0x1
-/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
+/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive, TBD
+ * for Medford)
+ */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1  0x2
-/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2  0x3
-/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3  0x4
-/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4  0x5
-/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
+/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive, TBD for
+ * Medford)
+ */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5  0x6
+/* enum: Edge DFE DLEV (TBD for Medford) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV  0x7
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0  0x0 /* enum */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY  0x7
 /* enum: TX Slew Rate Fine control */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET  0x8
+/* enum: TX Termination Impedance control */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET  0x9
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0  0x0 /* enum */
 #define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
 #define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
 
+/* MC_CMD_KR_TUNE_READ_FOM_IN msgrequest */
+#define    MC_CMD_KR_TUNE_READ_FOM_IN_LEN 8
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4
+
+/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */
+#define    MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4
+#define       MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0
+
 
 /***********************************/
 /* MC_CMD_PCIE_TUNE
 #define       MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
 /* enum: validate application */
 #define          MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE  0x0
+/* enum: mask application */
+#define          MC_CMD_LICENSED_APP_OP_IN_OP_MASK  0x1
 /* arguments specific to this particular operation */
 #define       MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
 #define       MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
 #define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
 
+/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */
+#define    MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
+/* application ID */
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+/* flag */
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+
+/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
+#define    MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
+
 
 /***********************************/
 /* MC_CMD_SET_PORT_SNIFF_CONFIG
- * Configure port sniffing for the physical port associated with the calling
+ * Configure RX port sniffing for the physical port associated with the calling
  * function. Only a privileged function may change the port sniffing
  * configuration. A copy of all traffic delivered to the host (non-promiscuous
  * mode) or all traffic arriving at the port (promiscuous mode) may be
 
 /***********************************/
 /* MC_CMD_GET_PORT_SNIFF_CONFIG
- * Obtain the current port sniffing configuration for the physical port
+ * Obtain the current RX port sniffing configuration for the physical port
  * associated with the calling function. Only a privileged function may read
  * the configuration.
  */
 #define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
 
 
+/***********************************/
+/* MC_CMD_SET_PARSER_DISP_CONFIG
+ * Change configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9
+
+#define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
+/* the type of configuration setting to change */
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+/* enum: Per-TXQ enable for multicast UDP destination lookup for possible
+ * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
+ */
+#define          MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN  0x0
+/* enum: Per-v-adaptor enable for suppression of self-transmissions on the
+ * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single
+ * boolean.)
+ */
+#define          MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX  0x1
+/* handle for the entity to update: queue handle, EVB port ID, etc. depending
+ * on the type of configuration setting being changed
+ */
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+/* new value: the details depend on the type of configuration setting being
+ * changed
+ */
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_OFST 8
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_CONFIG
+ * Read configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa
+
+#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
+/* the type of configuration setting to read */
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+/*            Enum values, see field(s): */
+/*               MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
+/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
+ * the type of configuration setting being read
+ */
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num))
+/* current value: the details depend on the type of configuration setting being
+ * read
+ */
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG
+ * Configure TX port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic transmitted through the port may be
+ * delivered to a specific queue, or a set of queues with RSS. Note that these
+ * packets are delivered with transmit timestamps in the packet prefix, not
+ * receive timestamps, so it is likely that the queue(s) will need to be
+ * dedicated as TX sniff receivers.
+ */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb
+
+#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define    MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define        MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define        MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+/* receive mode */
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+/* enum: receive to just the specified queue */
+#define          MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE  0x0
+/* enum: receive to multiple queues using RSS context */
+#define          MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS  0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define    MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG
+ * Obtain the current TX port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc
+
+#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define    MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define    MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define        MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define        MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+/* receive mode */
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+/* enum: receiving to just the specified queue */
+#define          MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE  0x0
+/* enum: receiving to multiple queues using RSS context */
+#define          MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS  0x1
+/* RSS context (for RX_MODE_RSS) */
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_RMON_STATS_RX_ERRORS
+ * Per queue rx error stats.
+ */
+#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe
+
+#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */
+#define    MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
+/* The rx queue to get stats for. */
+#define       MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
+#define       MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
+#define        MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
+#define        MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
+#define    MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_GET_PCIE_RESOURCE_INFO
+ * Find out about available PCIE resources
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
+#define    MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */
+#define    MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
+/* The maximum number of PFs the device can expose */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
+/* The maximum number of VFs the device can expose in total */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
+/* The maximum number of MSI-X vectors the device can provide in total */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
+/* the number of MSI-X vectors the device will allocate by default to each PF
+ */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
+/* the number of MSI-X vectors the device will allocate by default to each VF
+ */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
+/* the maximum number of MSI-X vectors the device can allocate to any one PF */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
+/* the maximum number of MSI-X vectors the device can allocate to any one VF */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_MODES
+ * Find out about available port modes
+ */
+#define MC_CMD_GET_PORT_MODES 0xff
+
+#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_MODES_IN msgrequest */
+#define    MC_CMD_GET_PORT_MODES_IN_LEN 0
+
+/* MC_CMD_GET_PORT_MODES_OUT msgresponse */
+#define    MC_CMD_GET_PORT_MODES_OUT_LEN 12
+/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */
+#define       MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+/* Default (canonical) board mode */
+#define       MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+/* Current board mode */
+#define       MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+
+
+/***********************************/
+/* MC_CMD_READ_ATB
+ * Sample voltages on the ATB
+ */
+#define MC_CMD_READ_ATB 0x100
+
+#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ_ATB_IN msgrequest */
+#define    MC_CMD_READ_ATB_IN_LEN 16
+#define       MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
+#define          MC_CMD_READ_ATB_IN_BUS_CCOM  0x0 /* enum */
+#define          MC_CMD_READ_ATB_IN_BUS_CKR  0x1 /* enum */
+#define          MC_CMD_READ_ATB_IN_BUS_CPCIE  0x8 /* enum */
+#define       MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
+#define       MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
+#define       MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
+
+/* MC_CMD_READ_ATB_OUT msgresponse */
+#define    MC_CMD_READ_ATB_OUT_LEN 4
+#define       MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_WORKAROUNDS
+ * Read the list of all implemented and all currently enabled workarounds. The
+ * enums here must correspond with those in MC_CMD_WORKAROUND.
+ */
+#define MC_CMD_GET_WORKAROUNDS 0x59
+
+#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
+#define    MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
+/* Each workaround is represented by a single bit according to the enums below.
+ */
+#define       MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define       MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+/* enum: Bug 17230 work around. */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20
+/* enum: Bug 26807 features present in firmware (multicast filter chaining) */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MASK
+ * Read/set privileges of an arbitrary PCIe function
+ */
+#define MC_CMD_PRIVILEGE_MASK 0x5a
+
+#define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PRIVILEGE_MASK_IN msgrequest */
+#define    MC_CMD_PRIVILEGE_MASK_IN_LEN 8
+/* The target function to have its mask read or set e.g. PF 0 = 0xFFFF0000, VF
+ * 1,3 = 0x00030001
+ */
+#define       MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16
+#define          MC_CMD_PRIVILEGE_MASK_IN_VF_NULL  0xffff /* enum */
+/* New privilege mask to be set. The mask will only be changed if the MSB is
+ * set to 1.
+ */
+#define       MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN             0x1 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK              0x2 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD            0x4 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP               0x8 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS  0x10 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING      0x20 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST           0x40 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST         0x80 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST         0x100 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST     0x200 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS       0x400 /* enum */
+/* enum: Set this bit to indicate that a new privilege mask is to be set,
+ * otherwise the command will only read the existing mask.
+ */
+#define          MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE             0x80000000
+
+/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */
+#define    MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
+/* For an admin function, always all the privileges are reported. */
+#define       MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE_MODE
+ * Read/set link state mode of a VF
+ */
+#define MC_CMD_LINK_STATE_MODE 0x5c
+
+#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
+#define    MC_CMD_LINK_STATE_MODE_IN_LEN 8
+/* The target function to have its link state mode read or set, must be a VF
+ * e.g. VF 1,3 = 0x00030001
+ */
+#define       MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
+/* New link state mode to be set */
+#define       MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO       0x0 /* enum */
+#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP         0x1 /* enum */
+#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN       0x2 /* enum */
+/* enum: Use this value to just read the existing setting without modifying it.
+ */
+#define          MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE         0xffffffff
+
+/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
+#define    MC_CMD_LINK_STATE_MODE_OUT_LEN 4
+#define       MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_SNAPSHOT_LENGTH
+ * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH
+ * parameter to MC_CMD_INIT_RXQ.
+ */
+#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
+
+#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */
+#define    MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */
+#define    MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
+/* Minimum acceptable snapshot length. */
+#define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
+/* Maximum acceptable snapshot length. */
+#define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
+
+
+/***********************************/
+/* MC_CMD_FUSE_DIAGS
+ * Additional fuse diagnostics
+ */
+#define MC_CMD_FUSE_DIAGS 0x102
+
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_FUSE_DIAGS_IN msgrequest */
+#define    MC_CMD_FUSE_DIAGS_IN_LEN 0
+
+/* MC_CMD_FUSE_DIAGS_OUT msgresponse */
+#define    MC_CMD_FUSE_DIAGS_OUT_LEN 48
+/* Total number of mismatched bits between pairs in area 0 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+/* Checksum of data after logical OR of pairs in area 0 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+/* Total number of mismatched bits between pairs in area 1 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+/* Checksum of data after logical OR of pairs in area 1 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+/* Total number of mismatched bits between pairs in area 2 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+/* Checksum of data after logical OR of pairs in area 2 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MODIFY
+ * Modify the privileges of a set of PCIe functions. Note that this operation
+ * only effects non-admin functions unless the admin privilege itself is
+ * included in one of the masks provided.
+ */
+#define MC_CMD_PRIVILEGE_MODIFY 0x60
+
+#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */
+#define    MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
+/* The groups of functions to have their privilege masks modified. */
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_NONE       0x0 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_ALL        0x1 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY   0x2 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY   0x3 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF  0x4 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_ONE        0x5 /* enum */
+/* For VFS_OF_PF specify the PF, for ONE specify the target function */
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16
+/* Privileges to be added to the target functions. For privilege definitions
+ * refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+/* Privileges to be removed from the target functions. For privilege
+ * definitions refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+
+/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
+#define    MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_BYTES
+ * Read XPM memory
+ */
+#define MC_CMD_XPM_READ_BYTES 0x103
+
+#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_BYTES_IN msgrequest */
+#define    MC_CMD_XPM_READ_BYTES_IN_LEN 8
+/* Start address (byte) */
+#define       MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define       MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
+#define    MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
+#define    MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252
+#define    MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num))
+/* Data */
+#define       MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0
+#define       MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1
+#define       MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0
+#define       MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_BYTES
+ * Write XPM memory
+ */
+#define MC_CMD_XPM_WRITE_BYTES 0x104
+
+#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
+#define    MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
+#define    MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252
+#define    MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
+/* Start address (byte) */
+#define       MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define       MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
+/* Data */
+#define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
+#define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
+#define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0
+#define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244
+
+/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */
+#define    MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_SECTOR
+ * Read XPM sector
+ */
+#define MC_CMD_XPM_READ_SECTOR 0x105
+
+#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
+#define    MC_CMD_XPM_READ_SECTOR_IN_LEN 8
+/* Sector index */
+#define       MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
+/* Sector size */
+#define       MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
+
+/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
+#define    MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
+#define    MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36
+#define    MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
+/* Sector type */
+#define       MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
+#define          MC_CMD_XPM_READ_SECTOR_OUT_BLANK            0x0 /* enum */
+#define          MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128   0x1 /* enum */
+#define          MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256   0x2 /* enum */
+#define          MC_CMD_XPM_READ_SECTOR_OUT_INVALID          0xff /* enum */
+/* Sector data */
+#define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
+#define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1
+#define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0
+#define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_SECTOR
+ * Write XPM sector
+ */
+#define MC_CMD_XPM_WRITE_SECTOR 0x106
+
+#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
+#define    MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
+#define    MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44
+#define    MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num))
+/* If writing fails due to an uncorrectable error, try up to RETRIES following
+ * sectors (or until no more space available). If 0, only one write attempt is
+ * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair
+ * mechanism.
+ */
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
+/* Sector type */
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
+/* Sector size */
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
+/* Sector data */
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32
+
+/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */
+#define    MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
+/* New sector index */
+#define       MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
+
+
+/***********************************/
+/* MC_CMD_XPM_INVALIDATE_SECTOR
+ * Invalidate XPM sector
+ */
+#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
+
+#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
+#define    MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
+/* Sector index */
+#define       MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
+#define    MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_BLANK_CHECK
+ * Blank-check XPM memory and report bad locations
+ */
+#define MC_CMD_XPM_BLANK_CHECK 0x108
+
+#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
+#define    MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
+/* Start address (byte) */
+#define       MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define       MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
+#define    MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
+#define    MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252
+#define    MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
+/* Total number of bad (non-blank) locations */
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
+/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
+ * into MCDI response)
+ */
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124
+
+
+/***********************************/
+/* MC_CMD_XPM_REPAIR
+ * Blank-check and repair XPM memory
+ */
+#define MC_CMD_XPM_REPAIR 0x109
+
+#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_REPAIR_IN msgrequest */
+#define    MC_CMD_XPM_REPAIR_IN_LEN 8
+/* Start address (byte) */
+#define       MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define       MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_REPAIR_OUT msgresponse */
+#define    MC_CMD_XPM_REPAIR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_DECODER_TEST
+ * Test XPM memory address decoders for gross manufacturing defects. Can only
+ * be performed on an unprogrammed part.
+ */
+#define MC_CMD_XPM_DECODER_TEST 0x10a
+
+#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
+#define    MC_CMD_XPM_DECODER_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */
+#define    MC_CMD_XPM_DECODER_TEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_TEST
+ * XPM memory write test. Test XPM write logic for gross manufacturing defects
+ * by writing to a dedicated test row. There are 16 locations in the test row
+ * and the test can only be performed on locations that have not been
+ * previously used (i.e. can be run at most 16 times). The test will pick the
+ * first available location to use, or fail with ENOSPC if none left.
+ */
+#define MC_CMD_XPM_WRITE_TEST 0x10b
+
+#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
+#define    MC_CMD_XPM_WRITE_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */
+#define    MC_CMD_XPM_WRITE_TEST_OUT_LEN 0
+
+
 #endif /* MCDI_PCOL_H */
index 47d1e3a96522668a1cf1c80cacd141d2afbf1193..4d35313a239db77d0d90a273e1d02aa6dfaabd81 100644 (file)
@@ -925,6 +925,7 @@ struct vfdi_status;
  * @stats_lock: Statistics update lock. Must be held when calling
  *     efx_nic_type::{update,start,stop}_stats.
  * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb
+ * @mc_promisc: Whether in multicast promiscuous mode when last changed
  *
  * This is stored in the private area of the &struct net_device.
  */
@@ -1072,6 +1073,7 @@ struct efx_nic {
        int last_irq_cpu;
        spinlock_t stats_lock;
        atomic_t n_rx_noskb_drops;
+       bool mc_promisc;
 };
 
 static inline int efx_dev_registered(struct efx_nic *efx)
index 31ff9084d9a46624d3a2350a10fdddfc18bbfc08..0b536e27d3b2291f0af62c4d51b005b0cfec8a72 100644 (file)
@@ -506,6 +506,7 @@ enum {
  * @rx_rss_context_exclusive: Whether our RSS context is exclusive or shared
  * @stats: Hardware statistics
  * @workaround_35388: Flag: firmware supports workaround for bug 35388
+ * @workaround_26807: Flag: firmware supports workaround for bug 26807
  * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
  *     after MC reboot
  * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
@@ -535,6 +536,7 @@ struct efx_ef10_nic_data {
        bool rx_rss_context_exclusive;
        u64 stats[EF10_STAT_COUNT];
        bool workaround_35388;
+       bool workaround_26807;
        bool must_check_datapath_caps;
        u32 datapath_caps;
        unsigned int rx_dpcpu_fw_id;
index b605dfd5c7bc7146600908b1c7a40f79b28dfd53..9d78830da6097ff1e6a42873f580734c7f970180 100644 (file)
@@ -114,7 +114,10 @@ static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
 
        if (efx->type->test_nvram) {
                rc = efx->type->test_nvram(efx);
-               tests->nvram = rc ? -1 : 1;
+               if (rc == -EPERM)
+                       rc = 0;
+               else
+                       tests->nvram = rc ? -1 : 1;
        }
 
        return rc;
@@ -253,6 +256,12 @@ static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
        mutex_lock(&efx->mac_lock);
        rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
        mutex_unlock(&efx->mac_lock);
+       if (rc == -EPERM)
+               rc = 0;
+       else
+               netif_info(efx, drv, efx->net_dev,
+                          "%s phy selftest\n", rc ? "Failed" : "Passed");
+
        return rc;
 }
 
@@ -661,6 +670,9 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
        wmb();
        kfree(state);
 
+       if (rc == -EPERM)
+               rc = 0;
+
        return rc;
 }
 
index b323b9167526f6f48da1e13da8fbc54fd4daa9e5..b2f886d9042976956aee1f2d7c8ba84751ae7de4 100644 (file)
@@ -1042,9 +1042,5 @@ const struct efx_nic_type siena_a0_nic_type = {
        .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
        .hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE |
                             1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
-                            1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC |
-                            1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ |
-                            1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT |
-                            1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC |
-                            1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ),
+                            1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT),
 };
index e817a1a4437927d8976fbfe64c4408f5c0fc5c52..b1e5f24708c923d5b9f4f54924bc1f8c34f64496 100644 (file)
 #include "stmmac.h"
 #include "stmmac_platform.h"
 
+static int dwmac_generic_probe(struct platform_device *pdev)
+{
+       struct plat_stmmacenet_data *plat_dat;
+       struct stmmac_resources stmmac_res;
+       int ret;
+
+       ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+       if (ret)
+               return ret;
+
+       if (pdev->dev.of_node) {
+               plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+               if (IS_ERR(plat_dat)) {
+                       dev_err(&pdev->dev, "dt configuration failed\n");
+                       return PTR_ERR(plat_dat);
+               }
+       } else {
+               plat_dat = dev_get_platdata(&pdev->dev);
+               if (!plat_dat) {
+                       dev_err(&pdev->dev, "no platform data provided\n");
+                       return  -EINVAL;
+               }
+
+               /* Set default value for multicast hash bins */
+               plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+               /* Set default value for unicast filter entries */
+               plat_dat->unicast_filter_entries = 1;
+       }
+
+       /* Custom initialisation (if needed) */
+       if (plat_dat->init) {
+               ret = plat_dat->init(pdev, plat_dat->bsp_priv);
+               if (ret)
+                       return ret;
+       }
+
+       return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
+
 static const struct of_device_id dwmac_generic_match[] = {
        { .compatible = "st,spear600-gmac"},
        { .compatible = "snps,dwmac-3.610"},
@@ -27,7 +67,7 @@ static const struct of_device_id dwmac_generic_match[] = {
 MODULE_DEVICE_TABLE(of, dwmac_generic_match);
 
 static struct platform_driver dwmac_generic_driver = {
-       .probe  = stmmac_pltfr_probe,
+       .probe  = dwmac_generic_probe,
        .remove = stmmac_pltfr_remove,
        .driver = {
                .name           = STMMAC_RESOURCE_NAME,
index 7e3129e7f143a9990c89780a8e6638f8182e4892..333489f0fd24d80ec5d09584b4cbb489eb1a880d 100644 (file)
@@ -248,23 +248,40 @@ static void *ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
        return NULL;
 }
 
-static void *ipq806x_gmac_setup(struct platform_device *pdev)
+static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
+{
+       struct ipq806x_gmac *gmac = priv;
+
+       ipq806x_gmac_set_speed(gmac, speed);
+}
+
+static int ipq806x_gmac_probe(struct platform_device *pdev)
 {
+       struct plat_stmmacenet_data *plat_dat;
+       struct stmmac_resources stmmac_res;
        struct device *dev = &pdev->dev;
        struct ipq806x_gmac *gmac;
        int val;
        void *err;
 
+       val = stmmac_get_platform_resources(pdev, &stmmac_res);
+       if (val)
+               return val;
+
+       plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+       if (IS_ERR(plat_dat))
+               return PTR_ERR(plat_dat);
+
        gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
        if (!gmac)
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
 
        gmac->pdev = pdev;
 
        err = ipq806x_gmac_of_parse(gmac);
-       if (err) {
+       if (IS_ERR(err)) {
                dev_err(dev, "device tree parsing error\n");
-               return err;
+               return PTR_ERR(err);
        }
 
        regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL,
@@ -285,7 +302,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
        default:
                dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
                        phy_modes(gmac->phy_mode));
-               return NULL;
+               return -EINVAL;
        }
        regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
 
@@ -304,7 +321,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
        default:
                dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
                        phy_modes(gmac->phy_mode));
-               return NULL;
+               return -EINVAL;
        }
        regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
 
@@ -327,30 +344,21 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
                             0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET);
        }
 
-       return gmac;
-}
+       plat_dat->has_gmac = true;
+       plat_dat->bsp_priv = gmac;
+       plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
 
-static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
-{
-       struct ipq806x_gmac *gmac = priv;
-
-       ipq806x_gmac_set_speed(gmac, speed);
+       return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
 }
 
-static const struct stmmac_of_data ipq806x_gmac_data = {
-       .has_gmac       = 1,
-       .setup          = ipq806x_gmac_setup,
-       .fix_mac_speed  = ipq806x_gmac_fix_mac_speed,
-};
-
 static const struct of_device_id ipq806x_gmac_dwmac_match[] = {
-       { .compatible = "qcom,ipq806x-gmac", .data = &ipq806x_gmac_data },
+       { .compatible = "qcom,ipq806x-gmac" },
        { }
 };
 MODULE_DEVICE_TABLE(of, ipq806x_gmac_dwmac_match);
 
 static struct platform_driver ipq806x_gmac_dwmac_driver = {
-       .probe = stmmac_pltfr_probe,
+       .probe = ipq806x_gmac_probe,
        .remove = stmmac_pltfr_remove,
        .driver = {
                .name           = "ipq806x-gmac-dwmac",
index cb888d3ebbdc3054732bdcfc8b17f8179e8f78fa..78e9d1861896335d86ac31e5701856d9bfe8779b 100644 (file)
 # define LPC18XX_CREG_CREG6_ETHMODE_MII                0x0
 # define LPC18XX_CREG_CREG6_ETHMODE_RMII       0x4
 
-struct lpc18xx_dwmac_priv_data {
+static int lpc18xx_dwmac_probe(struct platform_device *pdev)
+{
+       struct plat_stmmacenet_data *plat_dat;
+       struct stmmac_resources stmmac_res;
        struct regmap *reg;
-       int interface;
-};
+       u8 ethmode;
+       int ret;
 
-static void *lpc18xx_dwmac_setup(struct platform_device *pdev)
-{
-       struct lpc18xx_dwmac_priv_data *dwmac;
+       ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+       if (ret)
+               return ret;
 
-       dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
-       if (!dwmac)
-               return ERR_PTR(-ENOMEM);
+       plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+       if (IS_ERR(plat_dat))
+               return PTR_ERR(plat_dat);
 
-       dwmac->interface = of_get_phy_mode(pdev->dev.of_node);
-       if (dwmac->interface < 0)
-               return ERR_PTR(dwmac->interface);
+       plat_dat->has_gmac = true;
 
-       dwmac->reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
-       if (IS_ERR(dwmac->reg)) {
-               dev_err(&pdev->dev, "Syscon lookup failed\n");
-               return dwmac->reg;
+       reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
+       if (IS_ERR(reg)) {
+               dev_err(&pdev->dev, "syscon lookup failed\n");
+               return PTR_ERR(reg);
        }
 
-       return dwmac;
-}
-
-static int lpc18xx_dwmac_init(struct platform_device *pdev, void *priv)
-{
-       struct lpc18xx_dwmac_priv_data *dwmac = priv;
-       u8 ethmode;
-
-       if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
+       if (plat_dat->interface == PHY_INTERFACE_MODE_MII) {
                ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII;
-       } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
+       } else if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) {
                ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
        } else {
                dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
                return -EINVAL;
        }
 
-       regmap_update_bits(dwmac->reg, LPC18XX_CREG_CREG6,
+       regmap_update_bits(reg, LPC18XX_CREG_CREG6,
                           LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode);
 
-       return 0;
+       return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
 }
 
-static const struct stmmac_of_data lpc18xx_dwmac_data = {
-       .has_gmac = 1,
-       .setup = lpc18xx_dwmac_setup,
-       .init = lpc18xx_dwmac_init,
-};
-
 static const struct of_device_id lpc18xx_dwmac_match[] = {
-       { .compatible = "nxp,lpc1850-dwmac", .data = &lpc18xx_dwmac_data },
+       { .compatible = "nxp,lpc1850-dwmac" },
        { }
 };
 MODULE_DEVICE_TABLE(of, lpc18xx_dwmac_match);
 
 static struct platform_driver lpc18xx_dwmac_driver = {
-       .probe  = stmmac_pltfr_probe,
+       .probe  = lpc18xx_dwmac_probe,
        .remove = stmmac_pltfr_remove,
        .driver = {
                .name           = "lpc18xx-dwmac",
index 61a324a87d09e0b6f06873ce35131adbfbf9826d..c1bac1912b37189d85510fa6648e6e6271c8b0d1 100644 (file)
@@ -47,36 +47,45 @@ static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed)
        writel(val, dwmac->reg);
 }
 
-static void *meson6_dwmac_setup(struct platform_device *pdev)
+static int meson6_dwmac_probe(struct platform_device *pdev)
 {
+       struct plat_stmmacenet_data *plat_dat;
+       struct stmmac_resources stmmac_res;
        struct meson_dwmac *dwmac;
        struct resource *res;
+       int ret;
+
+       ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+       if (ret)
+               return ret;
+
+       plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+       if (IS_ERR(plat_dat))
+               return PTR_ERR(plat_dat);
 
        dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
        if (!dwmac)
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
        dwmac->reg = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(dwmac->reg))
-               return ERR_CAST(dwmac->reg);
+               return PTR_ERR(dwmac->reg);
 
-       return dwmac;
-}
+       plat_dat->bsp_priv = dwmac;
+       plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed;
 
-static const struct stmmac_of_data meson6_dwmac_data = {
-       .setup          = meson6_dwmac_setup,
-       .fix_mac_speed  = meson6_dwmac_fix_mac_speed,
-};
+       return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
 
 static const struct of_device_id meson6_dwmac_match[] = {
-       { .compatible = "amlogic,meson6-dwmac", .data = &meson6_dwmac_data},
+       { .compatible = "amlogic,meson6-dwmac" },
        { }
 };
 MODULE_DEVICE_TABLE(of, meson6_dwmac_match);
 
 static struct platform_driver meson6_dwmac_driver = {
-       .probe  = stmmac_pltfr_probe,
+       .probe  = meson6_dwmac_probe,
        .remove = stmmac_pltfr_remove,
        .driver = {
                .name           = "meson6-dwmac",
index 00a1e1e09d4f33bb1cd9dec333691e746dadb1ff..11baa4b197793f583eba9a5dc5c53aefb145ff9b 100644 (file)
@@ -46,7 +46,7 @@ struct rk_priv_data {
        struct platform_device *pdev;
        int phy_iface;
        struct regulator *regulator;
-       struct rk_gmac_ops *ops;
+       const struct rk_gmac_ops *ops;
 
        bool clk_enabled;
        bool clock_input;
@@ -177,7 +177,7 @@ static void rk3288_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
        }
 }
 
-struct rk_gmac_ops rk3288_ops = {
+static const struct rk_gmac_ops rk3288_ops = {
        .set_to_rgmii = rk3288_set_to_rgmii,
        .set_to_rmii = rk3288_set_to_rmii,
        .set_rgmii_speed = rk3288_set_rgmii_speed,
@@ -289,7 +289,7 @@ static void rk3368_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
        }
 }
 
-struct rk_gmac_ops rk3368_ops = {
+static const struct rk_gmac_ops rk3368_ops = {
        .set_to_rgmii = rk3368_set_to_rgmii,
        .set_to_rmii = rk3368_set_to_rmii,
        .set_rgmii_speed = rk3368_set_rgmii_speed,
@@ -448,7 +448,7 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
 }
 
 static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
-                                         struct rk_gmac_ops *ops)
+                                         const struct rk_gmac_ops *ops)
 {
        struct rk_priv_data *bsp_priv;
        struct device *dev = &pdev->dev;
@@ -529,16 +529,6 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
        return bsp_priv;
 }
 
-static void *rk3288_gmac_setup(struct platform_device *pdev)
-{
-       return rk_gmac_setup(pdev, &rk3288_ops);
-}
-
-static void *rk3368_gmac_setup(struct platform_device *pdev)
-{
-       return rk_gmac_setup(pdev, &rk3368_ops);
-}
-
 static int rk_gmac_init(struct platform_device *pdev, void *priv)
 {
        struct rk_priv_data *bsp_priv = priv;
@@ -576,31 +566,52 @@ static void rk_fix_speed(void *priv, unsigned int speed)
                dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
 }
 
-static const struct stmmac_of_data rk3288_gmac_data = {
-       .has_gmac = 1,
-       .fix_mac_speed = rk_fix_speed,
-       .setup = rk3288_gmac_setup,
-       .init = rk_gmac_init,
-       .exit = rk_gmac_exit,
-};
+static int rk_gmac_probe(struct platform_device *pdev)
+{
+       struct plat_stmmacenet_data *plat_dat;
+       struct stmmac_resources stmmac_res;
+       const struct rk_gmac_ops *data;
+       int ret;
 
-static const struct stmmac_of_data rk3368_gmac_data = {
-       .has_gmac = 1,
-       .fix_mac_speed = rk_fix_speed,
-       .setup = rk3368_gmac_setup,
-       .init = rk_gmac_init,
-       .exit = rk_gmac_exit,
-};
+       data = of_device_get_match_data(&pdev->dev);
+       if (!data) {
+               dev_err(&pdev->dev, "no of match data provided\n");
+               return -EINVAL;
+       }
+
+       ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+       if (ret)
+               return ret;
+
+       plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+       if (IS_ERR(plat_dat))
+               return PTR_ERR(plat_dat);
+
+       plat_dat->has_gmac = true;
+       plat_dat->init = rk_gmac_init;
+       plat_dat->exit = rk_gmac_exit;
+       plat_dat->fix_mac_speed = rk_fix_speed;
+
+       plat_dat->bsp_priv = rk_gmac_setup(pdev, data);
+       if (IS_ERR(plat_dat->bsp_priv))
+               return PTR_ERR(plat_dat->bsp_priv);
+
+       ret = rk_gmac_init(pdev, plat_dat->bsp_priv);
+       if (ret)
+               return ret;
+
+       return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
 
 static const struct of_device_id rk_gmac_dwmac_match[] = {
-       { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_gmac_data},
-       { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_gmac_data},
+       { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
+       { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
        { }
 };
 MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
 
 static struct platform_driver rk_gmac_dwmac_driver = {
-       .probe  = stmmac_pltfr_probe,
+       .probe  = rk_gmac_probe,
        .remove = stmmac_pltfr_remove,
        .driver = {
                .name           = "rk_gmac-dwmac",
index 8141c5b844ae681160fbf44b69e37f359492db85..401383b252a8f079aba4688afd6af1a53726b962 100644 (file)
@@ -175,31 +175,6 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
        return 0;
 }
 
-static void *socfpga_dwmac_probe(struct platform_device *pdev)
-{
-       struct device           *dev = &pdev->dev;
-       int                     ret;
-       struct socfpga_dwmac    *dwmac;
-
-       dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
-       if (!dwmac)
-               return ERR_PTR(-ENOMEM);
-
-       ret = socfpga_dwmac_parse_data(dwmac, dev);
-       if (ret) {
-               dev_err(dev, "Unable to parse OF data\n");
-               return ERR_PTR(ret);
-       }
-
-       ret = socfpga_dwmac_setup(dwmac);
-       if (ret) {
-               dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
-               return ERR_PTR(ret);
-       }
-
-       return dwmac;
-}
-
 static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv)
 {
        struct socfpga_dwmac    *dwmac = priv;
@@ -257,21 +232,58 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
        return ret;
 }
 
-static const struct stmmac_of_data socfpga_gmac_data = {
-       .setup = socfpga_dwmac_probe,
-       .init = socfpga_dwmac_init,
-       .exit = socfpga_dwmac_exit,
-       .fix_mac_speed = socfpga_dwmac_fix_mac_speed,
-};
+static int socfpga_dwmac_probe(struct platform_device *pdev)
+{
+       struct plat_stmmacenet_data *plat_dat;
+       struct stmmac_resources stmmac_res;
+       struct device           *dev = &pdev->dev;
+       int                     ret;
+       struct socfpga_dwmac    *dwmac;
+
+       ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+       if (ret)
+               return ret;
+
+       plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+       if (IS_ERR(plat_dat))
+               return PTR_ERR(plat_dat);
+
+       dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
+       if (!dwmac)
+               return -ENOMEM;
+
+       ret = socfpga_dwmac_parse_data(dwmac, dev);
+       if (ret) {
+               dev_err(dev, "Unable to parse OF data\n");
+               return ret;
+       }
+
+       ret = socfpga_dwmac_setup(dwmac);
+       if (ret) {
+               dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
+               return ret;
+       }
+
+       plat_dat->bsp_priv = dwmac;
+       plat_dat->init = socfpga_dwmac_init;
+       plat_dat->exit = socfpga_dwmac_exit;
+       plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
+
+       ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv);
+       if (ret)
+               return ret;
+
+       return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
 
 static const struct of_device_id socfpga_dwmac_match[] = {
-       { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gmac_data },
+       { .compatible = "altr,socfpga-stmmac" },
        { }
 };
 MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
 
 static struct platform_driver socfpga_dwmac_driver = {
-       .probe  = stmmac_pltfr_probe,
+       .probe  = socfpga_dwmac_probe,
        .remove = stmmac_pltfr_remove,
        .driver = {
                .name           = "socfpga-dwmac",
index a2e8111c5d14302ffafb6f7fcd9db9a3db7e00e3..7f6f4a4fcc708973af0aa48418bedaad984ef5fd 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/regmap.h>
 #include <linux/clk.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/of_net.h>
 
 #include "stmmac_platform.h"
@@ -128,6 +129,11 @@ struct sti_dwmac {
        struct device *dev;
        struct regmap *regmap;
        u32 speed;
+       void (*fix_retime_src)(void *priv, unsigned int speed);
+};
+
+struct sti_dwmac_of_data {
+       void (*fix_retime_src)(void *priv, unsigned int speed);
 };
 
 static u32 phy_intf_sels[] = {
@@ -222,8 +228,9 @@ static void stid127_fix_retime_src(void *priv, u32 spd)
        regmap_update_bits(dwmac->regmap, reg, STID127_RETIME_SRC_MASK, val);
 }
 
-static void sti_dwmac_ctrl_init(struct sti_dwmac *dwmac)
+static int sti_dwmac_init(struct platform_device *pdev, void *priv)
 {
+       struct sti_dwmac *dwmac = priv;
        struct regmap *regmap = dwmac->regmap;
        int iface = dwmac->interface;
        struct device *dev = dwmac->dev;
@@ -241,28 +248,8 @@ static void sti_dwmac_ctrl_init(struct sti_dwmac *dwmac)
 
        val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
        regmap_update_bits(regmap, reg, ENMII_MASK, val);
-}
-
-static int stix4xx_init(struct platform_device *pdev, void *priv)
-{
-       struct sti_dwmac *dwmac = priv;
-       u32 spd = dwmac->speed;
-
-       sti_dwmac_ctrl_init(dwmac);
-
-       stih4xx_fix_retime_src(priv, spd);
-
-       return 0;
-}
 
-static int stid127_init(struct platform_device *pdev, void *priv)
-{
-       struct sti_dwmac *dwmac = priv;
-       u32 spd = dwmac->speed;
-
-       sti_dwmac_ctrl_init(dwmac);
-
-       stid127_fix_retime_src(priv, spd);
+       dwmac->fix_retime_src(priv, dwmac->speed);
 
        return 0;
 }
@@ -334,36 +321,58 @@ static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
        return 0;
 }
 
-static void *sti_dwmac_setup(struct platform_device *pdev)
+static int sti_dwmac_probe(struct platform_device *pdev)
 {
+       struct plat_stmmacenet_data *plat_dat;
+       const struct sti_dwmac_of_data *data;
+       struct stmmac_resources stmmac_res;
        struct sti_dwmac *dwmac;
        int ret;
 
+       data = of_device_get_match_data(&pdev->dev);
+       if (!data) {
+               dev_err(&pdev->dev, "No OF match data provided\n");
+               return -EINVAL;
+       }
+
+       ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+       if (ret)
+               return ret;
+
+       plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+       if (IS_ERR(plat_dat))
+               return PTR_ERR(plat_dat);
+
        dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
        if (!dwmac)
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
 
        ret = sti_dwmac_parse_data(dwmac, pdev);
        if (ret) {
                dev_err(&pdev->dev, "Unable to parse OF data\n");
-               return ERR_PTR(ret);
+               return ret;
        }
 
-       return dwmac;
+       dwmac->fix_retime_src = data->fix_retime_src;
+
+       plat_dat->bsp_priv = dwmac;
+       plat_dat->init = sti_dwmac_init;
+       plat_dat->exit = sti_dwmac_exit;
+       plat_dat->fix_mac_speed = data->fix_retime_src;
+
+       ret = sti_dwmac_init(pdev, plat_dat->bsp_priv);
+       if (ret)
+               return ret;
+
+       return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
 }
 
-static const struct stmmac_of_data stih4xx_dwmac_data = {
-       .fix_mac_speed = stih4xx_fix_retime_src,
-       .setup = sti_dwmac_setup,
-       .init = stix4xx_init,
-       .exit = sti_dwmac_exit,
+static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
+       .fix_retime_src = stih4xx_fix_retime_src,
 };
 
-static const struct stmmac_of_data stid127_dwmac_data = {
-       .fix_mac_speed = stid127_fix_retime_src,
-       .setup = sti_dwmac_setup,
-       .init = stid127_init,
-       .exit = sti_dwmac_exit,
+static const struct sti_dwmac_of_data stid127_dwmac_data = {
+       .fix_retime_src = stid127_fix_retime_src,
 };
 
 static const struct of_device_id sti_dwmac_match[] = {
@@ -376,7 +385,7 @@ static const struct of_device_id sti_dwmac_match[] = {
 MODULE_DEVICE_TABLE(of, sti_dwmac_match);
 
 static struct platform_driver sti_dwmac_driver = {
-       .probe  = stmmac_pltfr_probe,
+       .probe  = sti_dwmac_probe,
        .remove = stmmac_pltfr_remove,
        .driver = {
                .name           = "sti-dwmac",
index 15048ca397591acb90f746a0e3b7bfbf5b9c9e86..52b8ed9bd87c2e20707c41ab0eaac247de26afce 100644 (file)
@@ -33,35 +33,6 @@ struct sunxi_priv_data {
        struct regulator *regulator;
 };
 
-static void *sun7i_gmac_setup(struct platform_device *pdev)
-{
-       struct sunxi_priv_data *gmac;
-       struct device *dev = &pdev->dev;
-
-       gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
-       if (!gmac)
-               return ERR_PTR(-ENOMEM);
-
-       gmac->interface = of_get_phy_mode(dev->of_node);
-
-       gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
-       if (IS_ERR(gmac->tx_clk)) {
-               dev_err(dev, "could not get tx clock\n");
-               return gmac->tx_clk;
-       }
-
-       /* Optional regulator for PHY */
-       gmac->regulator = devm_regulator_get_optional(dev, "phy");
-       if (IS_ERR(gmac->regulator)) {
-               if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
-                       return ERR_PTR(-EPROBE_DEFER);
-               dev_info(dev, "no regulator found\n");
-               gmac->regulator = NULL;
-       }
-
-       return gmac;
-}
-
 #define SUN7I_GMAC_GMII_RGMII_RATE     125000000
 #define SUN7I_GMAC_MII_RATE            25000000
 
@@ -132,25 +103,67 @@ static void sun7i_fix_speed(void *priv, unsigned int speed)
        }
 }
 
-/* of_data specifying hardware features and callbacks.
- * hardware features were copied from Allwinner drivers. */
-static const struct stmmac_of_data sun7i_gmac_data = {
-       .has_gmac = 1,
-       .tx_coe = 1,
-       .fix_mac_speed = sun7i_fix_speed,
-       .setup = sun7i_gmac_setup,
-       .init = sun7i_gmac_init,
-       .exit = sun7i_gmac_exit,
-};
+static int sun7i_gmac_probe(struct platform_device *pdev)
+{
+       struct plat_stmmacenet_data *plat_dat;
+       struct stmmac_resources stmmac_res;
+       struct sunxi_priv_data *gmac;
+       struct device *dev = &pdev->dev;
+       int ret;
+
+       ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+       if (ret)
+               return ret;
+
+       plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+       if (IS_ERR(plat_dat))
+               return PTR_ERR(plat_dat);
+
+       gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+       if (!gmac)
+               return -ENOMEM;
+
+       gmac->interface = of_get_phy_mode(dev->of_node);
+
+       gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
+       if (IS_ERR(gmac->tx_clk)) {
+               dev_err(dev, "could not get tx clock\n");
+               return PTR_ERR(gmac->tx_clk);
+       }
+
+       /* Optional regulator for PHY */
+       gmac->regulator = devm_regulator_get_optional(dev, "phy");
+       if (IS_ERR(gmac->regulator)) {
+               if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+               dev_info(dev, "no regulator found\n");
+               gmac->regulator = NULL;
+       }
+
+       /* platform data specifying hardware features and callbacks.
+        * hardware features were copied from Allwinner drivers. */
+       plat_dat->tx_coe = 1;
+       plat_dat->has_gmac = true;
+       plat_dat->bsp_priv = gmac;
+       plat_dat->init = sun7i_gmac_init;
+       plat_dat->exit = sun7i_gmac_exit;
+       plat_dat->fix_mac_speed = sun7i_fix_speed;
+
+       ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv);
+       if (ret)
+               return ret;
+
+       return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+}
 
 static const struct of_device_id sun7i_dwmac_match[] = {
-       { .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
+       { .compatible = "allwinner,sun7i-a20-gmac" },
        { }
 };
 MODULE_DEVICE_TABLE(of, sun7i_dwmac_match);
 
 static struct platform_driver sun7i_dwmac_driver = {
-       .probe  = stmmac_pltfr_probe,
+       .probe  = sun7i_gmac_probe,
        .remove = stmmac_pltfr_remove,
        .driver = {
                .name           = "sun7i-dwmac",
index bcdc8955c71945cf62e2d62e138d17a16f937949..d02691ba3d7feb15ec7e783db7d9086924a6f7fc 100644 (file)
@@ -104,32 +104,16 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
  * this function is to read the driver parameters from device-tree and
  * set some private fields that will be used by the main at runtime.
  */
-static int stmmac_probe_config_dt(struct platform_device *pdev,
-                                 struct plat_stmmacenet_data *plat,
-                                 const char **mac)
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
 {
        struct device_node *np = pdev->dev.of_node;
+       struct plat_stmmacenet_data *plat;
        struct stmmac_dma_cfg *dma_cfg;
-       const struct of_device_id *device;
-       struct device *dev = &pdev->dev;
-
-       device = of_match_device(dev->driver->of_match_table, dev);
-       if (device->data) {
-               const struct stmmac_of_data *data = device->data;
-               plat->has_gmac = data->has_gmac;
-               plat->enh_desc = data->enh_desc;
-               plat->tx_coe = data->tx_coe;
-               plat->rx_coe = data->rx_coe;
-               plat->bugged_jumbo = data->bugged_jumbo;
-               plat->pmt = data->pmt;
-               plat->riwt_off = data->riwt_off;
-               plat->fix_mac_speed = data->fix_mac_speed;
-               plat->bus_setup = data->bus_setup;
-               plat->setup = data->setup;
-               plat->free = data->free;
-               plat->init = data->init;
-               plat->exit = data->exit;
-       }
+
+       plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+       if (!plat)
+               return ERR_PTR(-ENOMEM);
 
        *mac = of_get_mac_address(np);
        plat->interface = of_get_phy_mode(np);
@@ -151,7 +135,7 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
        /* If phy-handle is not specified, check if we have a fixed-phy */
        if (!plat->phy_node && of_phy_is_fixed_link(np)) {
                if ((of_phy_register_fixed_link(np) < 0))
-                       return -ENODEV;
+                       return ERR_PTR(-ENODEV);
 
                plat->phy_node = of_node_get(np);
        }
@@ -182,6 +166,12 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
         */
        plat->maxmtu = JUMBO_LEN;
 
+       /* Set default value for multicast hash bins */
+       plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+       /* Set default value for unicast filter entries */
+       plat->unicast_filter_entries = 1;
+
        /*
         * Currently only the properties needed on SPEAr600
         * are provided. All other properties should be added
@@ -222,7 +212,7 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
                                       GFP_KERNEL);
                if (!dma_cfg) {
                        of_node_put(np);
-                       return -ENOMEM;
+                       return ERR_PTR(-ENOMEM);
                }
                plat->dma_cfg = dma_cfg;
                of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
@@ -240,44 +230,34 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
                pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
        }
 
-       return 0;
+       return plat;
 }
 #else
-static int stmmac_probe_config_dt(struct platform_device *pdev,
-                                 struct plat_stmmacenet_data *plat,
-                                 const char **mac)
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
 {
-       return -ENOSYS;
+       return ERR_PTR(-ENOSYS);
 }
 #endif /* CONFIG_OF */
+EXPORT_SYMBOL_GPL(stmmac_probe_config_dt);
 
-/**
- * stmmac_pltfr_probe - platform driver probe.
- * @pdev: platform device pointer
- * Description: platform_device probe function. It is to allocate
- * the necessary platform resources, invoke custom helper (if required) and
- * invoke the main probe function.
- */
-int stmmac_pltfr_probe(struct platform_device *pdev)
+int stmmac_get_platform_resources(struct platform_device *pdev,
+                                 struct stmmac_resources *stmmac_res)
 {
-       struct stmmac_resources stmmac_res;
-       int ret = 0;
        struct resource *res;
-       struct device *dev = &pdev->dev;
-       struct plat_stmmacenet_data *plat_dat = NULL;
 
-       memset(&stmmac_res, 0, sizeof(stmmac_res));
+       memset(stmmac_res, 0, sizeof(*stmmac_res));
 
        /* Get IRQ information early to have an ability to ask for deferred
         * probe if needed before we went too far with resource allocation.
         */
-       stmmac_res.irq = platform_get_irq_byname(pdev, "macirq");
-       if (stmmac_res.irq < 0) {
-               if (stmmac_res.irq != -EPROBE_DEFER) {
-                       dev_err(dev,
+       stmmac_res->irq = platform_get_irq_byname(pdev, "macirq");
+       if (stmmac_res->irq < 0) {
+               if (stmmac_res->irq != -EPROBE_DEFER) {
+                       dev_err(&pdev->dev,
                                "MAC IRQ configuration information not found\n");
                }
-               return stmmac_res.irq;
+               return stmmac_res->irq;
        }
 
        /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
@@ -287,64 +267,23 @@ int stmmac_pltfr_probe(struct platform_device *pdev)
         * In case the wake up interrupt is not passed from the platform
         * so the driver will continue to use the mac irq (ndev->irq)
         */
-       stmmac_res.wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
-       if (stmmac_res.wol_irq < 0) {
-               if (stmmac_res.wol_irq == -EPROBE_DEFER)
+       stmmac_res->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+       if (stmmac_res->wol_irq < 0) {
+               if (stmmac_res->wol_irq == -EPROBE_DEFER)
                        return -EPROBE_DEFER;
-               stmmac_res.wol_irq = stmmac_res.irq;
+               stmmac_res->wol_irq = stmmac_res->irq;
        }
 
-       stmmac_res.lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
-       if (stmmac_res.lpi_irq == -EPROBE_DEFER)
+       stmmac_res->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+       if (stmmac_res->lpi_irq == -EPROBE_DEFER)
                return -EPROBE_DEFER;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       stmmac_res.addr = devm_ioremap_resource(dev, res);
-       if (IS_ERR(stmmac_res.addr))
-               return PTR_ERR(stmmac_res.addr);
-
-       plat_dat = dev_get_platdata(&pdev->dev);
-
-       if (!plat_dat)
-               plat_dat = devm_kzalloc(&pdev->dev,
-                                       sizeof(struct plat_stmmacenet_data),
-                                       GFP_KERNEL);
-       if (!plat_dat) {
-               pr_err("%s: ERROR: no memory", __func__);
-               return  -ENOMEM;
-       }
-
-       /* Set default value for multicast hash bins */
-       plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
-
-       /* Set default value for unicast filter entries */
-       plat_dat->unicast_filter_entries = 1;
-
-       if (pdev->dev.of_node) {
-               ret = stmmac_probe_config_dt(pdev, plat_dat, &stmmac_res.mac);
-               if (ret) {
-                       pr_err("%s: main dt probe failed", __func__);
-                       return ret;
-               }
-       }
+       stmmac_res->addr = devm_ioremap_resource(&pdev->dev, res);
 
-       /* Custom setup (if needed) */
-       if (plat_dat->setup) {
-               plat_dat->bsp_priv = plat_dat->setup(pdev);
-               if (IS_ERR(plat_dat->bsp_priv))
-                       return PTR_ERR(plat_dat->bsp_priv);
-       }
-
-       /* Custom initialisation (if needed)*/
-       if (plat_dat->init) {
-               ret = plat_dat->init(pdev, plat_dat->bsp_priv);
-               if (unlikely(ret))
-                       return ret;
-       }
-
-       return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+       return PTR_ERR_OR_ZERO(stmmac_res->addr);
 }
-EXPORT_SYMBOL_GPL(stmmac_pltfr_probe);
+EXPORT_SYMBOL_GPL(stmmac_get_platform_resources);
 
 /**
  * stmmac_pltfr_remove
@@ -361,9 +300,6 @@ int stmmac_pltfr_remove(struct platform_device *pdev)
        if (priv->plat->exit)
                priv->plat->exit(pdev, priv->plat->bsp_priv);
 
-       if (priv->plat->free)
-               priv->plat->free(pdev, priv->plat->bsp_priv);
-
        return ret;
 }
 EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
index 71da86d7bd00d9f7feb0a501cce0e6bbb4cb7298..ffeb8d9e2b2ef9f55f5b788d1853c208cf38fde4 100644 (file)
 #ifndef __STMMAC_PLATFORM_H__
 #define __STMMAC_PLATFORM_H__
 
-int stmmac_pltfr_probe(struct platform_device *pdev);
+#include "stmmac.h"
+
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, const char **mac);
+
+int stmmac_get_platform_resources(struct platform_device *pdev,
+                                 struct stmmac_resources *stmmac_res);
+
 int stmmac_pltfr_remove(struct platform_device *pdev);
 extern const struct dev_pm_ops stmmac_pltfr_pm_ops;
 
diff --git a/drivers/net/ethernet/synopsys/Kconfig b/drivers/net/ethernet/synopsys/Kconfig
new file mode 100644 (file)
index 0000000..a8f3151
--- /dev/null
@@ -0,0 +1,27 @@
+#
+# Synopsys network device configuration
+#
+
+config NET_VENDOR_SYNOPSYS
+       bool "Synopsys devices"
+       default y
+       ---help---
+         If you have a network (Ethernet) device belonging to this class, say Y.
+
+         Note that the answer to this question doesn't directly affect the
+         kernel: saying N will just cause the configurator to skip all
+         the questions about Synopsys devices. If you say Y, you will be asked
+         for your specific device in the following questions.
+
+if NET_VENDOR_SYNOPSYS
+
+config SYNOPSYS_DWC_ETH_QOS
+       tristate "Sypnopsys DWC Ethernet QOS v4.10a support"
+       select PHYLIB
+       select CRC32
+       select MII
+       depends on OF
+       ---help---
+         This driver supports the DWC Ethernet QoS from Synopsys
+
+endif # NET_VENDOR_SYNOPSYS
diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile
new file mode 100644 (file)
index 0000000..7a37572
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# Makefile for the Synopsys network device drivers.
+#
+
+obj-$(CONFIG_SYNOPSYS_DWC_ETH_QOS) += dwc_eth_qos.o
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
new file mode 100644 (file)
index 0000000..85b3326
--- /dev/null
@@ -0,0 +1,3019 @@
+/*  Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver
+ *
+ *  This is a driver for the Synopsys DWC Ethernet QoS IP version 4.10a (GMAC).
+ *  This version introduced a lot of changes which breaks backwards
+ *  compatibility the non-QoS IP from Synopsys (used in the ST Micro drivers).
+ *  Some fields differ between version 4.00a and 4.10a, mainly the interrupt
+ *  bit fields. The driver could be made compatible with 4.00, if all relevant
+ *  HW erratas are handled.
+ *
+ *  The GMAC is highly configurable at synthesis time. This driver has been
+ *  developed for a subset of the total available feature set. Currently
+ *  it supports:
+ *  - TSO
+ *  - Checksum offload for RX and TX.
+ *  - Energy efficient ethernet.
+ *  - GMII phy interface.
+ *  - The statistics module.
+ *  - Single RX and TX queue.
+ *
+ *  Copyright (C) 2015 Axis Communications AB.
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms and conditions of the GNU General Public License,
+ *  version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ethtool.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+
+#include <linux/phy.h>
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/version.h>
+
+#include <linux/device.h>
+#include <linux/bitrev.h>
+#include <linux/crc32.h>
+
+#include <linux/of.h>
+#include <linux/interrupt.h>
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/timer.h>
+#include <linux/tcp.h>
+
+#define DRIVER_NAME                    "dwceqos"
+#define DRIVER_DESCRIPTION             "Synopsys DWC Ethernet QoS driver"
+#define DRIVER_VERSION                 "0.9"
+
+#define DWCEQOS_MSG_DEFAULT    (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+       NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+
+#define DWCEQOS_TX_TIMEOUT 5 /* Seconds */
+
+#define DWCEQOS_LPI_TIMER_MIN      8
+#define DWCEQOS_LPI_TIMER_MAX      ((1 << 20) - 1)
+
+#define DWCEQOS_RX_BUF_SIZE 2048
+
+#define DWCEQOS_RX_DCNT 256
+#define DWCEQOS_TX_DCNT 256
+
+#define DWCEQOS_HASH_TABLE_SIZE 64
+
+/* The size field in the DMA descriptor is 14 bits */
+#define BYTES_PER_DMA_DESC 16376
+
+/* Hardware registers */
+#define START_MAC_REG_OFFSET    0x0000
+#define MAX_MAC_REG_OFFSET      0x0bd0
+#define START_MTL_REG_OFFSET    0x0c00
+#define MAX_MTL_REG_OFFSET      0x0d7c
+#define START_DMA_REG_OFFSET    0x1000
+#define MAX_DMA_REG_OFFSET      0x117C
+
+#define REG_SPACE_SIZE          0x1800
+
+/* DMA */
+#define REG_DWCEQOS_DMA_MODE             0x1000
+#define REG_DWCEQOS_DMA_SYSBUS_MODE      0x1004
+#define REG_DWCEQOS_DMA_IS               0x1008
+#define REG_DWCEQOS_DMA_DEBUG_ST0        0x100c
+
+/* DMA channel registers */
+#define REG_DWCEQOS_DMA_CH0_CTRL         0x1100
+#define REG_DWCEQOS_DMA_CH0_TX_CTRL      0x1104
+#define REG_DWCEQOS_DMA_CH0_RX_CTRL      0x1108
+#define REG_DWCEQOS_DMA_CH0_TXDESC_LIST  0x1114
+#define REG_DWCEQOS_DMA_CH0_RXDESC_LIST  0x111c
+#define REG_DWCEQOS_DMA_CH0_TXDESC_TAIL  0x1120
+#define REG_DWCEQOS_DMA_CH0_RXDESC_TAIL  0x1128
+#define REG_DWCEQOS_DMA_CH0_TXDESC_LEN   0x112c
+#define REG_DWCEQOS_DMA_CH0_RXDESC_LEN   0x1130
+#define REG_DWCEQOS_DMA_CH0_IE           0x1134
+#define REG_DWCEQOS_DMA_CH0_CUR_TXDESC   0x1144
+#define REG_DWCEQOS_DMA_CH0_CUR_RXDESC   0x114c
+#define REG_DWCEQOS_DMA_CH0_CUR_TXBUF    0x1154
+#define REG_DWCEQOS_DMA_CH0_CUR_RXBUG    0x115c
+#define REG_DWCEQOS_DMA_CH0_STA          0x1160
+
+#define DWCEQOS_DMA_MODE_TXPR            BIT(11)
+#define DWCEQOS_DMA_MODE_DA              BIT(1)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_EN_LPI   BIT(31)
+#define DWCEQOS_DMA_SYSBUS_MODE_FB       BIT(0)
+#define DWCEQOS_DMA_SYSBUS_MODE_AAL      BIT(12)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(x) \
+       (((x) << 16) & 0x000F0000)
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT    3
+#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_MASK       GENMASK(19, 16)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(x) \
+       (((x) << 24) & 0x0F000000)
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT    3
+#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_MASK       GENMASK(27, 24)
+
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK GENMASK(7, 1)
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST(x) \
+       (((x) << 1) & DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK)
+#define DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT   GENMASK(3, 1)
+
+#define DWCEQOS_DMA_CH_CTRL_PBLX8       BIT(16)
+#define DWCEQOS_DMA_CH_CTRL_DSL(x)      ((x) << 18)
+
+#define DWCEQOS_DMA_CH_CTRL_PBL(x)       ((x) << 16)
+#define DWCEQOS_DMA_CH_CTRL_START         BIT(0)
+#define DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(x)   ((x) << 1)
+#define DWCEQOS_DMA_CH_TX_OSP            BIT(4)
+#define DWCEQOS_DMA_CH_TX_TSE            BIT(12)
+
+#define DWCEQOS_DMA_CH0_IE_NIE           BIT(15)
+#define DWCEQOS_DMA_CH0_IE_AIE           BIT(14)
+#define DWCEQOS_DMA_CH0_IE_RIE           BIT(6)
+#define DWCEQOS_DMA_CH0_IE_TIE           BIT(0)
+#define DWCEQOS_DMA_CH0_IE_FBEE          BIT(12)
+#define DWCEQOS_DMA_CH0_IE_RBUE          BIT(7)
+
+#define DWCEQOS_DMA_IS_DC0IS             BIT(0)
+#define DWCEQOS_DMA_IS_MTLIS             BIT(16)
+#define DWCEQOS_DMA_IS_MACIS             BIT(17)
+
+#define DWCEQOS_DMA_CH0_IS_TI            BIT(0)
+#define DWCEQOS_DMA_CH0_IS_RI            BIT(6)
+#define DWCEQOS_DMA_CH0_IS_RBU           BIT(7)
+#define DWCEQOS_DMA_CH0_IS_FBE           BIT(12)
+#define DWCEQOS_DMA_CH0_IS_CDE           BIT(13)
+#define DWCEQOS_DMA_CH0_IS_AIS           BIT(14)
+
+#define DWCEQOS_DMA_CH0_IS_TEB           GENMASK(18, 16)
+#define DWCEQOS_DMA_CH0_IS_TX_ERR_READ   BIT(16)
+#define DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR  BIT(17)
+
+#define DWCEQOS_DMA_CH0_IS_REB           GENMASK(21, 19)
+#define DWCEQOS_DMA_CH0_IS_RX_ERR_READ   BIT(19)
+#define DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR  BIT(20)
+
+/* DMA descriptor bits for RX normal descriptor (read format) */
+#define DWCEQOS_DMA_RDES3_OWN     BIT(31)
+#define DWCEQOS_DMA_RDES3_INTE    BIT(30)
+#define DWCEQOS_DMA_RDES3_BUF2V   BIT(25)
+#define DWCEQOS_DMA_RDES3_BUF1V   BIT(24)
+
+/* DMA descriptor bits for RX normal descriptor (write back format) */
+#define DWCEQOS_DMA_RDES1_IPCE    BIT(7)
+#define DWCEQOS_DMA_RDES3_ES      BIT(15)
+#define DWCEQOS_DMA_RDES3_E_JT    BIT(14)
+#define DWCEQOS_DMA_RDES3_PL(x)   ((x) & 0x7fff)
+#define DWCEQOS_DMA_RDES1_PT      0x00000007
+#define DWCEQOS_DMA_RDES1_PT_UDP  BIT(0)
+#define DWCEQOS_DMA_RDES1_PT_TCP  BIT(1)
+#define DWCEQOS_DMA_RDES1_PT_ICMP 0x00000003
+
+/* DMA descriptor bits for TX normal descriptor (read format) */
+#define DWCEQOS_DMA_TDES2_IOC     BIT(31)
+#define DWCEQOS_DMA_TDES3_OWN     BIT(31)
+#define DWCEQOS_DMA_TDES3_CTXT    BIT(30)
+#define DWCEQOS_DMA_TDES3_FD      BIT(29)
+#define DWCEQOS_DMA_TDES3_LD      BIT(28)
+#define DWCEQOS_DMA_TDES3_CIPH    BIT(16)
+#define DWCEQOS_DMA_TDES3_CIPP    BIT(17)
+#define DWCEQOS_DMA_TDES3_CA      0x00030000
+#define DWCEQOS_DMA_TDES3_TSE     BIT(18)
+#define DWCEQOS_DMA_DES3_THL(x)   ((x) << 19)
+#define DWCEQOS_DMA_DES2_B2L(x)   ((x) << 16)
+
+#define DWCEQOS_DMA_TDES3_TCMSSV    BIT(26)
+
+/* DMA channel states */
+#define DMA_TX_CH_STOPPED   0
+#define DMA_TX_CH_SUSPENDED 6
+
+#define DMA_GET_TX_STATE_CH0(status0) ((status0 & 0xF000) >> 12)
+
+/* MTL */
+#define REG_DWCEQOS_MTL_OPER             0x0c00
+#define REG_DWCEQOS_MTL_DEBUG_ST         0x0c0c
+#define REG_DWCEQOS_MTL_TXQ0_DEBUG_ST    0x0d08
+#define REG_DWCEQOS_MTL_RXQ0_DEBUG_ST    0x0d38
+
+#define REG_DWCEQOS_MTL_IS               0x0c20
+#define REG_DWCEQOS_MTL_TXQ0_OPER        0x0d00
+#define REG_DWCEQOS_MTL_RXQ0_OPER        0x0d30
+#define REG_DWCEQOS_MTL_RXQ0_MIS_CNT     0x0d34
+#define REG_DWCEQOS_MTL_RXQ0_CTRL         0x0d3c
+
+#define REG_DWCEQOS_MTL_Q0_ISCTRL         0x0d2c
+
+#define DWCEQOS_MTL_SCHALG_STRICT        0x00000060
+
+#define DWCEQOS_MTL_TXQ_TXQEN            BIT(3)
+#define DWCEQOS_MTL_TXQ_TSF              BIT(1)
+#define DWCEQOS_MTL_TXQ_FTQ              BIT(0)
+#define DWCEQOS_MTL_TXQ_TTC512           0x00000070
+
+#define DWCEQOS_MTL_TXQ_SIZE(x)          ((((x) - 256) & 0xff00) << 8)
+
+#define DWCEQOS_MTL_RXQ_SIZE(x)          ((((x) - 256) & 0xff00) << 12)
+#define DWCEQOS_MTL_RXQ_EHFC             BIT(7)
+#define DWCEQOS_MTL_RXQ_DIS_TCP_EF       BIT(6)
+#define DWCEQOS_MTL_RXQ_FEP              BIT(4)
+#define DWCEQOS_MTL_RXQ_FUP              BIT(3)
+#define DWCEQOS_MTL_RXQ_RSF              BIT(5)
+#define DWCEQOS_MTL_RXQ_RTC32            BIT(0)
+
+/* MAC */
+#define REG_DWCEQOS_MAC_CFG              0x0000
+#define REG_DWCEQOS_MAC_EXT_CFG          0x0004
+#define REG_DWCEQOS_MAC_PKT_FILT         0x0008
+#define REG_DWCEQOS_MAC_WD_TO            0x000c
+#define REG_DWCEQOS_HASTABLE_LO          0x0010
+#define REG_DWCEQOS_HASTABLE_HI          0x0014
+#define REG_DWCEQOS_MAC_IS               0x00b0
+#define REG_DWCEQOS_MAC_IE               0x00b4
+#define REG_DWCEQOS_MAC_STAT             0x00b8
+#define REG_DWCEQOS_MAC_MDIO_ADDR        0x0200
+#define REG_DWCEQOS_MAC_MDIO_DATA        0x0204
+#define REG_DWCEQOS_MAC_MAC_ADDR0_HI     0x0300
+#define REG_DWCEQOS_MAC_MAC_ADDR0_LO     0x0304
+#define REG_DWCEQOS_MAC_RXQ0_CTRL0       0x00a0
+#define REG_DWCEQOS_MAC_HW_FEATURE0      0x011c
+#define REG_DWCEQOS_MAC_HW_FEATURE1      0x0120
+#define REG_DWCEQOS_MAC_HW_FEATURE2      0x0124
+#define REG_DWCEQOS_MAC_HASHTABLE_LO     0x0010
+#define REG_DWCEQOS_MAC_HASHTABLE_HI     0x0014
+#define REG_DWCEQOS_MAC_LPI_CTRL_STATUS  0x00d0
+#define REG_DWCEQOS_MAC_LPI_TIMERS_CTRL  0x00d4
+#define REG_DWCEQOS_MAC_LPI_ENTRY_TIMER  0x00d8
+#define REG_DWCEQOS_MAC_1US_TIC_COUNTER  0x00dc
+#define REG_DWCEQOS_MAC_RX_FLOW_CTRL     0x0090
+#define REG_DWCEQOS_MAC_Q0_TX_FLOW      0x0070
+
+#define DWCEQOS_MAC_CFG_ACS              BIT(20)
+#define DWCEQOS_MAC_CFG_JD               BIT(17)
+#define DWCEQOS_MAC_CFG_JE               BIT(16)
+#define DWCEQOS_MAC_CFG_PS               BIT(15)
+#define DWCEQOS_MAC_CFG_FES              BIT(14)
+#define DWCEQOS_MAC_CFG_DM               BIT(13)
+#define DWCEQOS_MAC_CFG_DO               BIT(10)
+#define DWCEQOS_MAC_CFG_TE               BIT(1)
+#define DWCEQOS_MAC_CFG_IPC              BIT(27)
+#define DWCEQOS_MAC_CFG_RE               BIT(0)
+
+#define DWCEQOS_ADDR_HIGH(reg)           (0x00000300 + (reg * 8))
+#define DWCEQOS_ADDR_LOW(reg)            (0x00000304 + (reg * 8))
+
+#define DWCEQOS_MAC_IS_LPI_INT           BIT(5)
+#define DWCEQOS_MAC_IS_MMC_INT           BIT(8)
+
+#define DWCEQOS_MAC_RXQ_EN               BIT(1)
+#define DWCEQOS_MAC_MAC_ADDR_HI_EN       BIT(31)
+#define DWCEQOS_MAC_PKT_FILT_RA          BIT(31)
+#define DWCEQOS_MAC_PKT_FILT_HPF         BIT(10)
+#define DWCEQOS_MAC_PKT_FILT_SAF         BIT(9)
+#define DWCEQOS_MAC_PKT_FILT_SAIF        BIT(8)
+#define DWCEQOS_MAC_PKT_FILT_DBF         BIT(5)
+#define DWCEQOS_MAC_PKT_FILT_PM          BIT(4)
+#define DWCEQOS_MAC_PKT_FILT_DAIF        BIT(3)
+#define DWCEQOS_MAC_PKT_FILT_HMC         BIT(2)
+#define DWCEQOS_MAC_PKT_FILT_HUC         BIT(1)
+#define DWCEQOS_MAC_PKT_FILT_PR          BIT(0)
+
+#define DWCEQOS_MAC_MDIO_ADDR_CR(x)      (((x & 15)) << 8)
+#define DWCEQOS_MAC_MDIO_ADDR_CR_20      2
+#define DWCEQOS_MAC_MDIO_ADDR_CR_35      3
+#define DWCEQOS_MAC_MDIO_ADDR_CR_60      0
+#define DWCEQOS_MAC_MDIO_ADDR_CR_100     1
+#define DWCEQOS_MAC_MDIO_ADDR_CR_150     4
+#define DWCEQOS_MAC_MDIO_ADDR_CR_250     5
+#define DWCEQOS_MAC_MDIO_ADDR_GOC_READ   0x0000000c
+#define DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE  BIT(2)
+#define DWCEQOS_MAC_MDIO_ADDR_GB         BIT(0)
+
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEN  BIT(0)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEX  BIT(1)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEN  BIT(2)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEX  BIT(3)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST  BIT(8)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST  BIT(9)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN   BIT(16)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLS     BIT(17)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLSEN   BIT(18)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA  BIT(19)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE   BIT(20)
+#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE BIT(21)
+
+#define DWCEQOS_MAC_1US_TIC_COUNTER_VAL(x)  ((x) & GENMASK(11, 0))
+
+#define DWCEQOS_LPI_CTRL_ENABLE_EEE      (DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE | \
+                                         DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA | \
+                                         DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN)
+
+#define DWCEQOS_MAC_RX_FLOW_CTRL_RFE BIT(0)
+
+#define DWCEQOS_MAC_Q0_TX_FLOW_TFE   BIT(1)
+#define DWCEQOS_MAC_Q0_TX_FLOW_PT(time)        ((time) << 16)
+#define DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS (0 << 4)
+
+/* Features */
+#define DWCEQOS_MAC_HW_FEATURE0_RXCOESEL BIT(16)
+#define DWCEQOS_MAC_HW_FEATURE0_TXCOESEL BIT(14)
+#define DWCEQOS_MAC_HW_FEATURE0_HDSEL    BIT(2)
+#define DWCEQOS_MAC_HW_FEATURE0_EEESEL   BIT(13)
+#define DWCEQOS_MAC_HW_FEATURE0_GMIISEL  BIT(1)
+#define DWCEQOS_MAC_HW_FEATURE0_MIISEL   BIT(0)
+
+#define DWCEQOS_MAC_HW_FEATURE1_TSOEN    BIT(18)
+#define DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(x) ((128 << ((x) & 0x7c0)) >> 6)
+#define DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(x)  (128 << ((x) & 0x1f))
+
+#define DWCEQOS_MAX_PERFECT_ADDRESSES(feature1) \
+       (1 + (((feature1) & 0x1fc0000) >> 18))
+
+#define DWCEQOS_MDIO_PHYADDR(x)     (((x) & 0x1f) << 21)
+#define DWCEQOS_MDIO_PHYREG(x)      (((x) & 0x1f) << 16)
+
+#define DWCEQOS_DMA_MODE_SWR            BIT(0)
+
+#define DWCEQOS_DWCEQOS_RX_BUF_SIZE 2048
+
+/* Mac Management Counters */
+#define REG_DWCEQOS_MMC_CTRL             0x0700
+#define REG_DWCEQOS_MMC_RXIRQ            0x0704
+#define REG_DWCEQOS_MMC_TXIRQ            0x0708
+#define REG_DWCEQOS_MMC_RXIRQMASK        0x070c
+#define REG_DWCEQOS_MMC_TXIRQMASK        0x0710
+
+#define DWCEQOS_MMC_CTRL_CNTRST          BIT(0)
+#define DWCEQOS_MMC_CTRL_RSTONRD         BIT(2)
+
+#define DWC_MMC_TXLPITRANSCNTR           0x07F0
+#define DWC_MMC_TXLPIUSCNTR              0x07EC
+#define DWC_MMC_TXOVERSIZE_G             0x0778
+#define DWC_MMC_TXVLANPACKETS_G          0x0774
+#define DWC_MMC_TXPAUSEPACKETS           0x0770
+#define DWC_MMC_TXEXCESSDEF              0x076C
+#define DWC_MMC_TXPACKETCOUNT_G          0x0768
+#define DWC_MMC_TXOCTETCOUNT_G           0x0764
+#define DWC_MMC_TXCARRIERERROR           0x0760
+#define DWC_MMC_TXEXCESSCOL              0x075C
+#define DWC_MMC_TXLATECOL                0x0758
+#define DWC_MMC_TXDEFERRED               0x0754
+#define DWC_MMC_TXMULTICOL_G             0x0750
+#define DWC_MMC_TXSINGLECOL_G            0x074C
+#define DWC_MMC_TXUNDERFLOWERROR         0x0748
+#define DWC_MMC_TXBROADCASTPACKETS_GB    0x0744
+#define DWC_MMC_TXMULTICASTPACKETS_GB    0x0740
+#define DWC_MMC_TXUNICASTPACKETS_GB      0x073C
+#define DWC_MMC_TX1024TOMAXOCTETS_GB     0x0738
+#define DWC_MMC_TX512TO1023OCTETS_GB     0x0734
+#define DWC_MMC_TX256TO511OCTETS_GB      0x0730
+#define DWC_MMC_TX128TO255OCTETS_GB      0x072C
+#define DWC_MMC_TX65TO127OCTETS_GB       0x0728
+#define DWC_MMC_TX64OCTETS_GB            0x0724
+#define DWC_MMC_TXMULTICASTPACKETS_G     0x0720
+#define DWC_MMC_TXBROADCASTPACKETS_G     0x071C
+#define DWC_MMC_TXPACKETCOUNT_GB         0x0718
+#define DWC_MMC_TXOCTETCOUNT_GB          0x0714
+
+#define DWC_MMC_RXLPITRANSCNTR           0x07F8
+#define DWC_MMC_RXLPIUSCNTR              0x07F4
+#define DWC_MMC_RXCTRLPACKETS_G          0x07E4
+#define DWC_MMC_RXRCVERROR               0x07E0
+#define DWC_MMC_RXWATCHDOG               0x07DC
+#define DWC_MMC_RXVLANPACKETS_GB         0x07D8
+#define DWC_MMC_RXFIFOOVERFLOW           0x07D4
+#define DWC_MMC_RXPAUSEPACKETS           0x07D0
+#define DWC_MMC_RXOUTOFRANGETYPE         0x07CC
+#define DWC_MMC_RXLENGTHERROR            0x07C8
+#define DWC_MMC_RXUNICASTPACKETS_G       0x07C4
+#define DWC_MMC_RX1024TOMAXOCTETS_GB     0x07C0
+#define DWC_MMC_RX512TO1023OCTETS_GB     0x07BC
+#define DWC_MMC_RX256TO511OCTETS_GB      0x07B8
+#define DWC_MMC_RX128TO255OCTETS_GB      0x07B4
+#define DWC_MMC_RX65TO127OCTETS_GB       0x07B0
+#define DWC_MMC_RX64OCTETS_GB            0x07AC
+#define DWC_MMC_RXOVERSIZE_G             0x07A8
+#define DWC_MMC_RXUNDERSIZE_G            0x07A4
+#define DWC_MMC_RXJABBERERROR            0x07A0
+#define DWC_MMC_RXRUNTERROR              0x079C
+#define DWC_MMC_RXALIGNMENTERROR         0x0798
+#define DWC_MMC_RXCRCERROR               0x0794
+#define DWC_MMC_RXMULTICASTPACKETS_G     0x0790
+#define DWC_MMC_RXBROADCASTPACKETS_G     0x078C
+#define DWC_MMC_RXOCTETCOUNT_G           0x0788
+#define DWC_MMC_RXOCTETCOUNT_GB          0x0784
+#define DWC_MMC_RXPACKETCOUNT_GB         0x0780
+
+static int debug = 3;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)");
+
+/* DMA ring descriptor. These are used as support descriptors for the HW DMA */
+struct ring_desc {
+       struct sk_buff *skb;
+       dma_addr_t mapping;
+       size_t len;
+};
+
+/* DMA hardware descriptor */
+struct dwceqos_dma_desc {
+       u32     des0;
+       u32     des1;
+       u32     des2;
+       u32     des3;
+} ____cacheline_aligned;
+
+struct dwceqos_mmc_counters {
+       __u64 txlpitranscntr;
+       __u64 txpiuscntr;
+       __u64 txoversize_g;
+       __u64 txvlanpackets_g;
+       __u64 txpausepackets;
+       __u64 txexcessdef;
+       __u64 txpacketcount_g;
+       __u64 txoctetcount_g;
+       __u64 txcarriererror;
+       __u64 txexcesscol;
+       __u64 txlatecol;
+       __u64 txdeferred;
+       __u64 txmulticol_g;
+       __u64 txsinglecol_g;
+       __u64 txunderflowerror;
+       __u64 txbroadcastpackets_gb;
+       __u64 txmulticastpackets_gb;
+       __u64 txunicastpackets_gb;
+       __u64 tx1024tomaxoctets_gb;
+       __u64 tx512to1023octets_gb;
+       __u64 tx256to511octets_gb;
+       __u64 tx128to255octets_gb;
+       __u64 tx65to127octets_gb;
+       __u64 tx64octets_gb;
+       __u64 txmulticastpackets_g;
+       __u64 txbroadcastpackets_g;
+       __u64 txpacketcount_gb;
+       __u64 txoctetcount_gb;
+
+       __u64 rxlpitranscntr;
+       __u64 rxlpiuscntr;
+       __u64 rxctrlpackets_g;
+       __u64 rxrcverror;
+       __u64 rxwatchdog;
+       __u64 rxvlanpackets_gb;
+       __u64 rxfifooverflow;
+       __u64 rxpausepackets;
+       __u64 rxoutofrangetype;
+       __u64 rxlengtherror;
+       __u64 rxunicastpackets_g;
+       __u64 rx1024tomaxoctets_gb;
+       __u64 rx512to1023octets_gb;
+       __u64 rx256to511octets_gb;
+       __u64 rx128to255octets_gb;
+       __u64 rx65to127octets_gb;
+       __u64 rx64octets_gb;
+       __u64 rxoversize_g;
+       __u64 rxundersize_g;
+       __u64 rxjabbererror;
+       __u64 rxrunterror;
+       __u64 rxalignmenterror;
+       __u64 rxcrcerror;
+       __u64 rxmulticastpackets_g;
+       __u64 rxbroadcastpackets_g;
+       __u64 rxoctetcount_g;
+       __u64 rxoctetcount_gb;
+       __u64 rxpacketcount_gb;
+};
+
+/* Ethtool statistics */
+
+struct dwceqos_stat {
+       const char stat_name[ETH_GSTRING_LEN];
+       int   offset;
+};
+
+#define STAT_ITEM(name, var) \
+       {\
+               name,\
+               offsetof(struct dwceqos_mmc_counters, var),\
+       }
+
+static const struct dwceqos_stat dwceqos_ethtool_stats[] = {
+       STAT_ITEM("tx_bytes", txoctetcount_gb),
+       STAT_ITEM("tx_packets", txpacketcount_gb),
+       STAT_ITEM("tx_unicst_packets", txunicastpackets_gb),
+       STAT_ITEM("tx_broadcast_packets", txbroadcastpackets_gb),
+       STAT_ITEM("tx_multicast_packets",  txmulticastpackets_gb),
+       STAT_ITEM("tx_pause_packets", txpausepackets),
+       STAT_ITEM("tx_up_to_64_byte_packets", tx64octets_gb),
+       STAT_ITEM("tx_65_to_127_byte_packets",  tx65to127octets_gb),
+       STAT_ITEM("tx_128_to_255_byte_packets", tx128to255octets_gb),
+       STAT_ITEM("tx_256_to_511_byte_packets", tx256to511octets_gb),
+       STAT_ITEM("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
+       STAT_ITEM("tx_1024_to_maxsize_packets", tx1024tomaxoctets_gb),
+       STAT_ITEM("tx_underflow_errors", txunderflowerror),
+       STAT_ITEM("tx_lpi_count", txlpitranscntr),
+
+       STAT_ITEM("rx_bytes", rxoctetcount_gb),
+       STAT_ITEM("rx_packets", rxpacketcount_gb),
+       STAT_ITEM("rx_unicast_packets", rxunicastpackets_g),
+       STAT_ITEM("rx_broadcast_packets", rxbroadcastpackets_g),
+       STAT_ITEM("rx_multicast_packets", rxmulticastpackets_g),
+       STAT_ITEM("rx_vlan_packets", rxvlanpackets_gb),
+       STAT_ITEM("rx_pause_packets", rxpausepackets),
+       STAT_ITEM("rx_up_to_64_byte_packets", rx64octets_gb),
+       STAT_ITEM("rx_65_to_127_byte_packets",  rx65to127octets_gb),
+       STAT_ITEM("rx_128_to_255_byte_packets", rx128to255octets_gb),
+       STAT_ITEM("rx_256_to_511_byte_packets", rx256to511octets_gb),
+       STAT_ITEM("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
+       STAT_ITEM("rx_1024_to_maxsize_packets", rx1024tomaxoctets_gb),
+       STAT_ITEM("rx_fifo_overflow_errors", rxfifooverflow),
+       STAT_ITEM("rx_oversize_packets", rxoversize_g),
+       STAT_ITEM("rx_undersize_packets", rxundersize_g),
+       STAT_ITEM("rx_jabbers", rxjabbererror),
+       STAT_ITEM("rx_align_errors", rxalignmenterror),
+       STAT_ITEM("rx_crc_errors", rxcrcerror),
+       STAT_ITEM("rx_lpi_count", rxlpitranscntr),
+};
+
+/* Configuration of AXI bus parameters.
+ * These values depend on the parameters set on the MAC core as well
+ * as the AXI interconnect.
+ */
+struct dwceqos_bus_cfg {
+       /* Enable AXI low-power interface. */
+       bool en_lpi;
+       /* Limit on number of outstanding AXI write requests. */
+       u32 write_requests;
+       /* Limit on number of outstanding AXI read requests. */
+       u32 read_requests;
+       /* Bitmap of allowed AXI burst lengths, 4-256 beats. */
+       u32 burst_map;
+       /* DMA Programmable burst length*/
+       u32 tx_pbl;
+       u32 rx_pbl;
+};
+
+struct dwceqos_flowcontrol {
+       int autoneg;
+       int rx;
+       int rx_current;
+       int tx;
+       int tx_current;
+};
+
+struct net_local {
+       void __iomem *baseaddr;
+       struct clk *phy_ref_clk;
+       struct clk *apb_pclk;
+
+       struct device_node *phy_node;
+       struct net_device *ndev;
+       struct platform_device *pdev;
+
+       u32 msg_enable;
+
+       struct tasklet_struct tx_bdreclaim_tasklet;
+       struct workqueue_struct *txtimeout_handler_wq;
+       struct work_struct txtimeout_reinit;
+
+       phy_interface_t phy_interface;
+       struct phy_device *phy_dev;
+       struct mii_bus *mii_bus;
+
+       unsigned int link;
+       unsigned int speed;
+       unsigned int duplex;
+
+       struct napi_struct napi;
+
+       /* DMA Descriptor Areas */
+       struct ring_desc *rx_skb;
+       struct ring_desc *tx_skb;
+
+       struct dwceqos_dma_desc *tx_descs;
+       struct dwceqos_dma_desc *rx_descs;
+
+       /* DMA Mapped Descriptor areas*/
+       dma_addr_t tx_descs_addr;
+       dma_addr_t rx_descs_addr;
+       dma_addr_t tx_descs_tail_addr;
+       dma_addr_t rx_descs_tail_addr;
+
+       size_t tx_free;
+       size_t tx_next;
+       size_t rx_cur;
+       size_t tx_cur;
+
+       /* Spinlocks for accessing DMA Descriptors */
+       spinlock_t tx_lock;
+
+       /* Spinlock for register read-modify-writes. */
+       spinlock_t hw_lock;
+
+       u32 feature0;
+       u32 feature1;
+       u32 feature2;
+
+       struct dwceqos_bus_cfg bus_cfg;
+       bool en_tx_lpi_clockgating;
+
+       int eee_enabled;
+       int eee_active;
+       int csr_val;
+       u32 gso_size;
+
+       struct dwceqos_mmc_counters mmc_counters;
+       /* Protect the mmc_counter updates. */
+       spinlock_t stats_lock;
+       u32 mmc_rx_counters_mask;
+       u32 mmc_tx_counters_mask;
+
+       struct dwceqos_flowcontrol flowcontrol;
+};
+
+static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
+                                     u32 tx_mask);
+
+static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr,
+                                 unsigned int reg_n);
+static int dwceqos_stop(struct net_device *ndev);
+static int dwceqos_open(struct net_device *ndev);
+static void dwceqos_tx_poll_demand(struct net_local *lp);
+
+static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable);
+static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable);
+
+static void dwceqos_reset_state(struct net_local *lp);
+
+#define dwceqos_read(lp, reg)                                          \
+       readl_relaxed(((void __iomem *)((lp)->baseaddr)) + (reg))
+#define dwceqos_write(lp, reg, val)                                    \
+       writel_relaxed((val), ((void __iomem *)((lp)->baseaddr)) + (reg))
+
+static void dwceqos_reset_state(struct net_local *lp)
+{
+       lp->link    = 0;
+       lp->speed   = 0;
+       lp->duplex  = DUPLEX_UNKNOWN;
+       lp->flowcontrol.rx_current = 0;
+       lp->flowcontrol.tx_current = 0;
+       lp->eee_active = 0;
+       lp->eee_enabled = 0;
+}
+
+static void print_descriptor(struct net_local *lp, int index, int tx)
+{
+       struct dwceqos_dma_desc *dd;
+
+       if (tx)
+               dd = (struct dwceqos_dma_desc *)&lp->tx_descs[index];
+       else
+               dd = (struct dwceqos_dma_desc *)&lp->rx_descs[index];
+
+       pr_info("%s DMA Descriptor #%d@%p Contents:\n", tx ? "TX" : "RX",
+               index, dd);
+       pr_info("0x%08x 0x%08x 0x%08x 0x%08x\n", dd->des0, dd->des1, dd->des2,
+               dd->des3);
+}
+
+static void print_status(struct net_local *lp)
+{
+       size_t desci, i;
+
+       pr_info("tx_free %zu, tx_cur %zu, tx_next %zu\n", lp->tx_free,
+               lp->tx_cur, lp->tx_next);
+
+       print_descriptor(lp, lp->rx_cur, 0);
+
+       for (desci = (lp->tx_cur - 10) % DWCEQOS_TX_DCNT, i = 0;
+                i < DWCEQOS_TX_DCNT;
+                ++i) {
+               print_descriptor(lp, desci, 1);
+               desci = (desci + 1) % DWCEQOS_TX_DCNT;
+       }
+
+       pr_info("DMA_Debug_Status0:          0x%08x\n",
+               dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0));
+       pr_info("DMA_CH0_Status:             0x%08x\n",
+               dwceqos_read(lp, REG_DWCEQOS_DMA_IS));
+       pr_info("DMA_CH0_Current_App_TxDesc: 0x%08x\n",
+               dwceqos_read(lp, 0x1144));
+       pr_info("DMA_CH0_Current_App_TxBuff: 0x%08x\n",
+               dwceqos_read(lp, 0x1154));
+       pr_info("MTL_Debug_Status:      0x%08x\n",
+               dwceqos_read(lp, REG_DWCEQOS_MTL_DEBUG_ST));
+       pr_info("MTL_TXQ0_Debug_Status: 0x%08x\n",
+               dwceqos_read(lp, REG_DWCEQOS_MTL_TXQ0_DEBUG_ST));
+       pr_info("MTL_RXQ0_Debug_Status: 0x%08x\n",
+               dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_DEBUG_ST));
+       pr_info("Current TX DMA: 0x%08x, RX DMA: 0x%08x\n",
+               dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_TXDESC),
+               dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_RXDESC));
+}
+
+static void dwceqos_mdio_set_csr(struct net_local *lp)
+{
+       int rate = clk_get_rate(lp->apb_pclk);
+
+       if (rate <= 20000000)
+               lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_20;
+       else if (rate <= 35000000)
+               lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_35;
+       else if (rate <= 60000000)
+               lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_60;
+       else if (rate <= 100000000)
+               lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_100;
+       else if (rate <= 150000000)
+               lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_150;
+       else if (rate <= 250000000)
+               lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_250;
+}
+
+/* Simple MDIO functions implementing mii_bus */
+static int dwceqos_mdio_read(struct mii_bus *bus, int mii_id, int phyreg)
+{
+       struct net_local *lp = bus->priv;
+       u32 regval;
+       int i;
+       int data;
+
+       regval = DWCEQOS_MDIO_PHYADDR(mii_id) |
+               DWCEQOS_MDIO_PHYREG(phyreg) |
+               DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) |
+               DWCEQOS_MAC_MDIO_ADDR_GB |
+               DWCEQOS_MAC_MDIO_ADDR_GOC_READ;
+       dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval);
+
+       for (i = 0; i < 5; ++i) {
+               usleep_range(64, 128);
+               if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) &
+                     DWCEQOS_MAC_MDIO_ADDR_GB))
+                       break;
+       }
+
+       data = dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_DATA);
+       if (i == 5) {
+               netdev_warn(lp->ndev, "MDIO read timed out\n");
+               data = 0xffff;
+       }
+
+       return data & 0xffff;
+}
+
+static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg,
+                             u16 value)
+{
+       struct net_local *lp = bus->priv;
+       u32 regval;
+       int i;
+
+       dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_DATA, value);
+
+       regval = DWCEQOS_MDIO_PHYADDR(mii_id) |
+               DWCEQOS_MDIO_PHYREG(phyreg) |
+               DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) |
+               DWCEQOS_MAC_MDIO_ADDR_GB |
+               DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE;
+       dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval);
+
+       for (i = 0; i < 5; ++i) {
+               usleep_range(64, 128);
+               if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) &
+                     DWCEQOS_MAC_MDIO_ADDR_GB))
+                       break;
+       }
+       if (i == 5)
+               netdev_warn(lp->ndev, "MDIO write timed out\n");
+       return 0;
+}
+
+static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       struct phy_device *phydev = lp->phy_dev;
+
+       if (!netif_running(ndev))
+               return -EINVAL;
+
+       if (!phydev)
+               return -ENODEV;
+
+       switch (cmd) {
+       case SIOCGMIIPHY:
+       case SIOCGMIIREG:
+       case SIOCSMIIREG:
+               return phy_mii_ioctl(phydev, rq, cmd);
+       default:
+               dev_info(&lp->pdev->dev, "ioctl %X not implemented.\n", cmd);
+               return -EOPNOTSUPP;
+       }
+}
+
+static void dwceqos_link_down(struct net_local *lp)
+{
+       u32 regval;
+       unsigned long flags;
+
+       /* Indicate link down to the LPI state machine */
+       spin_lock_irqsave(&lp->hw_lock, flags);
+       regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+       regval &= ~DWCEQOS_MAC_LPI_CTRL_STATUS_PLS;
+       dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+       spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_link_up(struct net_local *lp)
+{
+       u32 regval;
+       unsigned long flags;
+
+       /* Indicate link up to the LPI state machine */
+       spin_lock_irqsave(&lp->hw_lock, flags);
+       regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+       regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_PLS;
+       dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+       spin_unlock_irqrestore(&lp->hw_lock, flags);
+
+       lp->eee_active = !phy_init_eee(lp->phy_dev, 0);
+
+       /* Check for changed EEE capability */
+       if (!lp->eee_active && lp->eee_enabled) {
+               lp->eee_enabled = 0;
+
+               spin_lock_irqsave(&lp->hw_lock, flags);
+               regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+               regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE;
+               dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+               spin_unlock_irqrestore(&lp->hw_lock, flags);
+       }
+}
+
+static void dwceqos_set_speed(struct net_local *lp)
+{
+       struct phy_device *phydev = lp->phy_dev;
+       u32 regval;
+
+       regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+       regval &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES |
+                   DWCEQOS_MAC_CFG_DM);
+
+       if (phydev->duplex)
+               regval |= DWCEQOS_MAC_CFG_DM;
+       if (phydev->speed == SPEED_10) {
+               regval |= DWCEQOS_MAC_CFG_PS;
+       } else if (phydev->speed == SPEED_100) {
+               regval |= DWCEQOS_MAC_CFG_PS |
+                       DWCEQOS_MAC_CFG_FES;
+       } else if (phydev->speed != SPEED_1000) {
+               netdev_err(lp->ndev,
+                          "unknown PHY speed %d\n",
+                          phydev->speed);
+               return;
+       }
+
+       dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, regval);
+}
+
+static void dwceqos_adjust_link(struct net_device *ndev)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       struct phy_device *phydev = lp->phy_dev;
+       int status_change = 0;
+
+       if (phydev->link) {
+               if ((lp->speed != phydev->speed) ||
+                   (lp->duplex != phydev->duplex)) {
+                       dwceqos_set_speed(lp);
+
+                       lp->speed = phydev->speed;
+                       lp->duplex = phydev->duplex;
+                       status_change = 1;
+               }
+
+               if (lp->flowcontrol.autoneg) {
+                       lp->flowcontrol.rx = phydev->pause ||
+                                            phydev->asym_pause;
+                       lp->flowcontrol.tx = phydev->pause ||
+                                            phydev->asym_pause;
+               }
+
+               if (lp->flowcontrol.rx != lp->flowcontrol.rx_current) {
+                       if (netif_msg_link(lp))
+                               netdev_dbg(ndev, "set rx flow to %d\n",
+                                          lp->flowcontrol.rx);
+                       dwceqos_set_rx_flowcontrol(lp, lp->flowcontrol.rx);
+                       lp->flowcontrol.rx_current = lp->flowcontrol.rx;
+               }
+               if (lp->flowcontrol.tx != lp->flowcontrol.tx_current) {
+                       if (netif_msg_link(lp))
+                               netdev_dbg(ndev, "set tx flow to %d\n",
+                                          lp->flowcontrol.tx);
+                       dwceqos_set_tx_flowcontrol(lp, lp->flowcontrol.tx);
+                       lp->flowcontrol.tx_current = lp->flowcontrol.tx;
+               }
+       }
+
+       if (phydev->link != lp->link) {
+               lp->link = phydev->link;
+               status_change = 1;
+       }
+
+       if (status_change) {
+               if (phydev->link) {
+                       lp->ndev->trans_start = jiffies;
+                       dwceqos_link_up(lp);
+               } else {
+                       dwceqos_link_down(lp);
+               }
+               phy_print_status(phydev);
+       }
+}
+
+static int dwceqos_mii_probe(struct net_device *ndev)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       struct phy_device *phydev = NULL;
+
+       if (lp->phy_node) {
+               phydev = of_phy_connect(lp->ndev,
+                                       lp->phy_node,
+                                       &dwceqos_adjust_link,
+                                       0,
+                                       lp->phy_interface);
+
+               if (!phydev) {
+                       netdev_err(ndev, "no PHY found\n");
+                       return -1;
+               }
+       } else {
+               netdev_err(ndev, "no PHY configured\n");
+               return -ENODEV;
+       }
+
+       if (netif_msg_probe(lp))
+               netdev_dbg(lp->ndev,
+                          "phydev %p, phydev->phy_id 0xa%x, phydev->addr 0x%x\n",
+                          phydev, phydev->phy_id, phydev->addr);
+
+       phydev->supported &= PHY_GBIT_FEATURES;
+
+       lp->link    = 0;
+       lp->speed   = 0;
+       lp->duplex  = DUPLEX_UNKNOWN;
+       lp->phy_dev = phydev;
+
+       if (netif_msg_probe(lp)) {
+               netdev_dbg(lp->ndev, "phy_addr 0x%x, phy_id 0x%08x\n",
+                          lp->phy_dev->addr, lp->phy_dev->phy_id);
+
+               netdev_dbg(lp->ndev, "attach [%s] phy driver\n",
+                          lp->phy_dev->drv->name);
+       }
+
+       return 0;
+}
+
+static void dwceqos_alloc_rxring_desc(struct net_local *lp, int index)
+{
+       struct sk_buff *new_skb;
+       dma_addr_t new_skb_baddr = 0;
+
+       new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE);
+       if (!new_skb) {
+               netdev_err(lp->ndev, "alloc_skb error for desc %d\n", index);
+               goto err_out;
+       }
+
+       new_skb_baddr = dma_map_single(lp->ndev->dev.parent,
+                                      new_skb->data, DWCEQOS_RX_BUF_SIZE,
+                                      DMA_FROM_DEVICE);
+       if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) {
+               netdev_err(lp->ndev, "DMA map error\n");
+               dev_kfree_skb(new_skb);
+               new_skb = NULL;
+               goto err_out;
+       }
+
+       lp->rx_descs[index].des0 = new_skb_baddr;
+       lp->rx_descs[index].des1 = 0;
+       lp->rx_descs[index].des2 = 0;
+       lp->rx_descs[index].des3 = DWCEQOS_DMA_RDES3_INTE |
+                                  DWCEQOS_DMA_RDES3_BUF1V |
+                                  DWCEQOS_DMA_RDES3_OWN;
+
+       lp->rx_skb[index].mapping = new_skb_baddr;
+       lp->rx_skb[index].len = DWCEQOS_RX_BUF_SIZE;
+
+err_out:
+       lp->rx_skb[index].skb = new_skb;
+}
+
+static void dwceqos_clean_rings(struct net_local *lp)
+{
+       int i;
+
+       if (lp->rx_skb) {
+               for (i = 0; i < DWCEQOS_RX_DCNT; i++) {
+                       if (lp->rx_skb[i].skb) {
+                               dma_unmap_single(lp->ndev->dev.parent,
+                                                lp->rx_skb[i].mapping,
+                                                lp->rx_skb[i].len,
+                                                DMA_FROM_DEVICE);
+
+                               dev_kfree_skb(lp->rx_skb[i].skb);
+                               lp->rx_skb[i].skb = NULL;
+                               lp->rx_skb[i].mapping = 0;
+                       }
+               }
+       }
+
+       if (lp->tx_skb) {
+               for (i = 0; i < DWCEQOS_TX_DCNT; i++) {
+                       if (lp->tx_skb[i].skb) {
+                               dev_kfree_skb(lp->tx_skb[i].skb);
+                               lp->tx_skb[i].skb = NULL;
+                       }
+                       if (lp->tx_skb[i].mapping) {
+                               dma_unmap_single(lp->ndev->dev.parent,
+                                                lp->tx_skb[i].mapping,
+                                                lp->tx_skb[i].len,
+                                                DMA_TO_DEVICE);
+                               lp->tx_skb[i].mapping = 0;
+                       }
+               }
+       }
+}
+
+static void dwceqos_descriptor_free(struct net_local *lp)
+{
+       int size;
+
+       dwceqos_clean_rings(lp);
+
+       kfree(lp->tx_skb);
+       lp->tx_skb = NULL;
+       kfree(lp->rx_skb);
+       lp->rx_skb = NULL;
+
+       size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
+       if (lp->rx_descs) {
+               dma_free_coherent(lp->ndev->dev.parent, size,
+                                 (void *)(lp->rx_descs), lp->rx_descs_addr);
+               lp->rx_descs = NULL;
+       }
+
+       size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
+       if (lp->tx_descs) {
+               dma_free_coherent(lp->ndev->dev.parent, size,
+                                 (void *)(lp->tx_descs), lp->tx_descs_addr);
+               lp->tx_descs = NULL;
+       }
+}
+
+static int dwceqos_descriptor_init(struct net_local *lp)
+{
+       int size;
+       u32 i;
+
+       lp->gso_size = 0;
+
+       lp->tx_skb = NULL;
+       lp->rx_skb = NULL;
+       lp->rx_descs = NULL;
+       lp->tx_descs = NULL;
+
+       /* Reset the DMA indexes */
+       lp->rx_cur = 0;
+       lp->tx_cur = 0;
+       lp->tx_next = 0;
+       lp->tx_free = DWCEQOS_TX_DCNT;
+
+       /* Allocate Ring descriptors */
+       size = DWCEQOS_RX_DCNT * sizeof(struct ring_desc);
+       lp->rx_skb = kzalloc(size, GFP_KERNEL);
+       if (!lp->rx_skb)
+               goto err_out;
+
+       size = DWCEQOS_TX_DCNT * sizeof(struct ring_desc);
+       lp->tx_skb = kzalloc(size, GFP_KERNEL);
+       if (!lp->tx_skb)
+               goto err_out;
+
+       /* Allocate DMA descriptors */
+       size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
+       lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
+                       &lp->rx_descs_addr, 0);
+       if (!lp->rx_descs)
+               goto err_out;
+       lp->rx_descs_tail_addr = lp->rx_descs_addr +
+               sizeof(struct dwceqos_dma_desc) * DWCEQOS_RX_DCNT;
+
+       size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
+       lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
+                       &lp->tx_descs_addr, 0);
+       if (!lp->tx_descs)
+               goto err_out;
+       lp->tx_descs_tail_addr = lp->tx_descs_addr +
+               sizeof(struct dwceqos_dma_desc) * DWCEQOS_TX_DCNT;
+
+       /* Initialize RX Ring Descriptors and buffers */
+       for (i = 0; i < DWCEQOS_RX_DCNT; ++i) {
+               dwceqos_alloc_rxring_desc(lp, i);
+               if (!(lp->rx_skb[lp->rx_cur].skb))
+                       goto err_out;
+       }
+
+       /* Initialize TX Descriptors */
+       for (i = 0; i < DWCEQOS_TX_DCNT; ++i) {
+               lp->tx_descs[i].des0 = 0;
+               lp->tx_descs[i].des1 = 0;
+               lp->tx_descs[i].des2 = 0;
+               lp->tx_descs[i].des3 = 0;
+       }
+
+       /* Make descriptor writes visible to the DMA. */
+       wmb();
+
+       return 0;
+
+err_out:
+       dwceqos_descriptor_free(lp);
+       return -ENOMEM;
+}
+
+static int dwceqos_packet_avail(struct net_local *lp)
+{
+       return !(lp->rx_descs[lp->rx_cur].des3 & DWCEQOS_DMA_RDES3_OWN);
+}
+
+static void dwceqos_get_hwfeatures(struct net_local *lp)
+{
+       lp->feature0 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE0);
+       lp->feature1 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE1);
+       lp->feature2 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE2);
+}
+
+static void dwceqos_dma_enable_txirq(struct net_local *lp)
+{
+       u32 regval;
+       unsigned long flags;
+
+       spin_lock_irqsave(&lp->hw_lock, flags);
+       regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+       regval |= DWCEQOS_DMA_CH0_IE_TIE;
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+       spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_disable_txirq(struct net_local *lp)
+{
+       u32 regval;
+       unsigned long flags;
+
+       spin_lock_irqsave(&lp->hw_lock, flags);
+       regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+       regval &= ~DWCEQOS_DMA_CH0_IE_TIE;
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+       spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_enable_rxirq(struct net_local *lp)
+{
+       u32 regval;
+       unsigned long flags;
+
+       spin_lock_irqsave(&lp->hw_lock, flags);
+       regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+       regval |= DWCEQOS_DMA_CH0_IE_RIE;
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+       spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_dma_disable_rxirq(struct net_local *lp)
+{
+       u32 regval;
+       unsigned long flags;
+
+       spin_lock_irqsave(&lp->hw_lock, flags);
+       regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
+       regval &= ~DWCEQOS_DMA_CH0_IE_RIE;
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
+       spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_enable_mmc_interrupt(struct net_local *lp)
+{
+       dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, 0);
+       dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, 0);
+}
+
+static int dwceqos_mii_init(struct net_local *lp)
+{
+       int ret = -ENXIO, i;
+       struct resource res;
+       struct device_node *mdionode;
+
+       mdionode = of_get_child_by_name(lp->pdev->dev.of_node, "mdio");
+
+       if (!mdionode)
+               return 0;
+
+       lp->mii_bus = mdiobus_alloc();
+       if (!lp->mii_bus) {
+               ret = -ENOMEM;
+               goto err_out;
+       }
+
+       lp->mii_bus->name  = "DWCEQOS MII bus";
+       lp->mii_bus->read  = &dwceqos_mdio_read;
+       lp->mii_bus->write = &dwceqos_mdio_write;
+       lp->mii_bus->priv = lp;
+       lp->mii_bus->parent = &lp->ndev->dev;
+
+       lp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+       if (!lp->mii_bus->irq) {
+               ret = -ENOMEM;
+               goto err_out_free_mdiobus;
+       }
+
+       for (i = 0; i < PHY_MAX_ADDR; i++)
+               lp->mii_bus->irq[i] = PHY_POLL;
+       of_address_to_resource(lp->pdev->dev.of_node, 0, &res);
+       snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx",
+                (unsigned long long)res.start);
+       if (of_mdiobus_register(lp->mii_bus, mdionode))
+               goto err_out_free_mdio_irq;
+
+       return 0;
+
+err_out_free_mdio_irq:
+       kfree(lp->mii_bus->irq);
+err_out_free_mdiobus:
+       mdiobus_free(lp->mii_bus);
+err_out:
+       of_node_put(mdionode);
+       return ret;
+}
+
+/* DMA reset. When issued also resets all MTL and MAC registers as well */
+static void dwceqos_reset_hw(struct net_local *lp)
+{
+       /* Wait (at most) 0.5 seconds for DMA reset*/
+       int i = 5000;
+       u32 reg;
+
+       /* Force gigabit to guarantee a TX clock for GMII. */
+       reg = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+       reg &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES);
+       reg |= DWCEQOS_MAC_CFG_DM;
+       dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, reg);
+
+       dwceqos_write(lp, REG_DWCEQOS_DMA_MODE, DWCEQOS_DMA_MODE_SWR);
+
+       do {
+               udelay(100);
+               i--;
+               reg = dwceqos_read(lp, REG_DWCEQOS_DMA_MODE);
+       } while ((reg & DWCEQOS_DMA_MODE_SWR) && i);
+       /* We might experience a timeout if the chip clock mux is broken */
+       if (!i)
+               netdev_err(lp->ndev, "DMA reset timed out!\n");
+}
+
+static void dwceqos_fatal_bus_error(struct net_local *lp, u32 dma_status)
+{
+       if (dma_status & DWCEQOS_DMA_CH0_IS_TEB) {
+               netdev_err(lp->ndev, "txdma bus error %s %s (status=%08x)\n",
+                          dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_READ ?
+                               "read" : "write",
+                          dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR ?
+                               "descr" : "data",
+                          dma_status);
+
+               print_status(lp);
+       }
+       if (dma_status & DWCEQOS_DMA_CH0_IS_REB) {
+               netdev_err(lp->ndev, "rxdma bus error %s %s (status=%08x)\n",
+                          dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_READ ?
+                               "read" : "write",
+                          dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR ?
+                               "descr" : "data",
+                          dma_status);
+
+               print_status(lp);
+       }
+}
+
+static void dwceqos_mmc_interrupt(struct net_local *lp)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&lp->stats_lock, flags);
+
+       /* A latched mmc interrupt can not be masked, we must read
+        *  all the counters with an interrupt pending.
+        */
+       dwceqos_read_mmc_counters(lp,
+                                 dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQ),
+                                 dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQ));
+
+       spin_unlock_irqrestore(&lp->stats_lock, flags);
+}
+
+static void dwceqos_mac_interrupt(struct net_local *lp)
+{
+       u32 cause;
+
+       cause = dwceqos_read(lp, REG_DWCEQOS_MAC_IS);
+
+       if (cause & DWCEQOS_MAC_IS_MMC_INT)
+               dwceqos_mmc_interrupt(lp);
+}
+
+static irqreturn_t dwceqos_interrupt(int irq, void *dev_id)
+{
+       struct net_device *ndev = dev_id;
+       struct net_local *lp = netdev_priv(ndev);
+
+       u32 cause;
+       u32 dma_status;
+       irqreturn_t ret = IRQ_NONE;
+
+       cause = dwceqos_read(lp, REG_DWCEQOS_DMA_IS);
+       /* DMA Channel 0 Interrupt */
+       if (cause & DWCEQOS_DMA_IS_DC0IS) {
+               dma_status = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_STA);
+
+               /* Transmit Interrupt */
+               if (dma_status & DWCEQOS_DMA_CH0_IS_TI) {
+                       tasklet_schedule(&lp->tx_bdreclaim_tasklet);
+                       dwceqos_dma_disable_txirq(lp);
+               }
+
+               /* Receive Interrupt */
+               if (dma_status & DWCEQOS_DMA_CH0_IS_RI) {
+                       /* Disable RX IRQs */
+                       dwceqos_dma_disable_rxirq(lp);
+                       napi_schedule(&lp->napi);
+               }
+
+               /* Fatal Bus Error interrupt */
+               if (unlikely(dma_status & DWCEQOS_DMA_CH0_IS_FBE)) {
+                       dwceqos_fatal_bus_error(lp, dma_status);
+
+                       /* errata 9000831707 */
+                       dma_status |= DWCEQOS_DMA_CH0_IS_TEB |
+                                     DWCEQOS_DMA_CH0_IS_REB;
+               }
+
+               /* Ack all DMA Channel 0 IRQs */
+               dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, dma_status);
+               ret = IRQ_HANDLED;
+       }
+
+       if (cause & DWCEQOS_DMA_IS_MTLIS) {
+               u32 val = dwceqos_read(lp, REG_DWCEQOS_MTL_Q0_ISCTRL);
+
+               dwceqos_write(lp, REG_DWCEQOS_MTL_Q0_ISCTRL, val);
+               ret = IRQ_HANDLED;
+       }
+
+       if (cause & DWCEQOS_DMA_IS_MACIS) {
+               dwceqos_mac_interrupt(lp);
+               ret = IRQ_HANDLED;
+       }
+       return ret;
+}
+
+static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable)
+{
+       u32 regval;
+       unsigned long flags;
+
+       spin_lock_irqsave(&lp->hw_lock, flags);
+
+       regval = dwceqos_read(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL);
+       if (enable)
+               regval |= DWCEQOS_MAC_RX_FLOW_CTRL_RFE;
+       else
+               regval &= ~DWCEQOS_MAC_RX_FLOW_CTRL_RFE;
+       dwceqos_write(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL, regval);
+
+       spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable)
+{
+       u32 regval;
+       unsigned long flags;
+
+       spin_lock_irqsave(&lp->hw_lock, flags);
+
+       /* MTL flow control */
+       regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER);
+       if (enable)
+               regval |= DWCEQOS_MTL_RXQ_EHFC;
+       else
+               regval &= ~DWCEQOS_MTL_RXQ_EHFC;
+
+       dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+
+       /* MAC flow control */
+       regval = dwceqos_read(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW);
+       if (enable)
+               regval |= DWCEQOS_MAC_Q0_TX_FLOW_TFE;
+       else
+               regval &= ~DWCEQOS_MAC_Q0_TX_FLOW_TFE;
+       dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval);
+
+       spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_configure_flow_control(struct net_local *lp)
+{
+       u32 regval;
+       unsigned long flags;
+       int RQS, RFD, RFA;
+
+       spin_lock_irqsave(&lp->hw_lock, flags);
+
+       regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER);
+
+       /* The queue size is in units of 256 bytes. We want 512 bytes units for
+        * the threshold fields.
+        */
+       RQS = ((regval >> 20) & 0x3FF) + 1;
+       RQS /= 2;
+
+       /* The thresholds are relative to a full queue, with a bias
+        * of 1 KiByte below full.
+        */
+       RFD = RQS / 2 - 2;
+       RFA = RQS / 8 - 2;
+
+       regval = (regval & 0xFFF000FF) | (RFD << 14) | (RFA << 8);
+
+       if (RFD >= 0 && RFA >= 0) {
+               dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+       } else {
+               netdev_warn(lp->ndev,
+                           "FIFO too small for flow control.");
+       }
+
+       regval = DWCEQOS_MAC_Q0_TX_FLOW_PT(256) |
+                DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS;
+
+       dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval);
+
+       spin_unlock_irqrestore(&lp->hw_lock, flags);
+}
+
+static void dwceqos_configure_clock(struct net_local *lp)
+{
+       unsigned long rate_mhz = clk_get_rate(lp->apb_pclk) / 1000000;
+
+       BUG_ON(!rate_mhz);
+
+       dwceqos_write(lp,
+                     REG_DWCEQOS_MAC_1US_TIC_COUNTER,
+                     DWCEQOS_MAC_1US_TIC_COUNTER_VAL(rate_mhz - 1));
+}
+
+static void dwceqos_configure_bus(struct net_local *lp)
+{
+       u32 sysbus_reg;
+
+       /* N.B. We do not support the Fixed Burst mode because it
+        * opens a race window by making HW access to DMA descriptors
+        * non-atomic.
+        */
+
+       sysbus_reg = DWCEQOS_DMA_SYSBUS_MODE_AAL;
+
+       if (lp->bus_cfg.en_lpi)
+               sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_EN_LPI;
+
+       if (lp->bus_cfg.burst_map)
+               sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST(
+                       lp->bus_cfg.burst_map);
+       else
+               sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST(
+                       DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT);
+
+       if (lp->bus_cfg.read_requests)
+               sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
+                       lp->bus_cfg.read_requests - 1);
+       else
+               sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
+                       DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT);
+
+       if (lp->bus_cfg.write_requests)
+               sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
+                       lp->bus_cfg.write_requests - 1);
+       else
+               sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
+                       DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT);
+
+       if (netif_msg_hw(lp))
+               netdev_dbg(lp->ndev, "SysbusMode %#X\n", sysbus_reg);
+
+       dwceqos_write(lp, REG_DWCEQOS_DMA_SYSBUS_MODE, sysbus_reg);
+}
+
+static void dwceqos_init_hw(struct net_local *lp)
+{
+       u32 regval;
+       u32 buswidth;
+       u32 dma_skip;
+
+       /* Software reset */
+       dwceqos_reset_hw(lp);
+
+       dwceqos_configure_bus(lp);
+
+       /* Probe data bus width, 32/64/128 bits. */
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, 0xF);
+       regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL);
+       buswidth = (regval ^ 0xF) + 1;
+
+       /* Cache-align dma descriptors. */
+       dma_skip = (sizeof(struct dwceqos_dma_desc) - 16) / buswidth;
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_CTRL,
+                     DWCEQOS_DMA_CH_CTRL_DSL(dma_skip) |
+                     DWCEQOS_DMA_CH_CTRL_PBLX8);
+
+       /* Initialize DMA Channel 0 */
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LEN, DWCEQOS_TX_DCNT - 1);
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LEN, DWCEQOS_RX_DCNT - 1);
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LIST,
+                     (u32)lp->tx_descs_addr);
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LIST,
+                     (u32)lp->rx_descs_addr);
+
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL,
+                     lp->tx_descs_tail_addr);
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL,
+                     lp->rx_descs_tail_addr);
+
+       if (lp->bus_cfg.tx_pbl)
+               regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.tx_pbl);
+       else
+               regval = DWCEQOS_DMA_CH_CTRL_PBL(2);
+
+       /* Enable TSO if the HW support it */
+       if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN)
+               regval |= DWCEQOS_DMA_CH_TX_TSE;
+
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, regval);
+
+       if (lp->bus_cfg.rx_pbl)
+               regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.rx_pbl);
+       else
+               regval = DWCEQOS_DMA_CH_CTRL_PBL(2);
+
+       regval |= DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(DWCEQOS_DWCEQOS_RX_BUF_SIZE);
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval);
+
+       regval |= DWCEQOS_DMA_CH_CTRL_START;
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval);
+
+       /* Initialize MTL Queues */
+       regval = DWCEQOS_MTL_SCHALG_STRICT;
+       dwceqos_write(lp, REG_DWCEQOS_MTL_OPER, regval);
+
+       regval = DWCEQOS_MTL_TXQ_SIZE(
+                       DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(lp->feature1)) |
+               DWCEQOS_MTL_TXQ_TXQEN | DWCEQOS_MTL_TXQ_TSF |
+               DWCEQOS_MTL_TXQ_TTC512;
+       dwceqos_write(lp, REG_DWCEQOS_MTL_TXQ0_OPER, regval);
+
+       regval = DWCEQOS_MTL_RXQ_SIZE(
+                       DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(lp->feature1)) |
+               DWCEQOS_MTL_RXQ_FUP | DWCEQOS_MTL_RXQ_FEP | DWCEQOS_MTL_RXQ_RSF;
+       dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
+
+       dwceqos_configure_flow_control(lp);
+
+       /* Initialize MAC */
+       dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+
+       lp->eee_enabled = 0;
+
+       dwceqos_configure_clock(lp);
+
+       /* MMC counters */
+
+       /* probe implemented counters */
+       dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, ~0u);
+       dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, ~0u);
+       lp->mmc_rx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQMASK);
+       lp->mmc_tx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQMASK);
+
+       dwceqos_write(lp, REG_DWCEQOS_MMC_CTRL, DWCEQOS_MMC_CTRL_CNTRST |
+               DWCEQOS_MMC_CTRL_RSTONRD);
+       dwceqos_enable_mmc_interrupt(lp);
+
+       /* Enable Interrupts */
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
+                     DWCEQOS_DMA_CH0_IE_NIE |
+                     DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
+                     DWCEQOS_DMA_CH0_IE_AIE |
+                     DWCEQOS_DMA_CH0_IE_FBEE);
+
+       dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0);
+
+       dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC |
+               DWCEQOS_MAC_CFG_DM | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
+
+       /* Start TX DMA */
+       regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL);
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL,
+                     regval | DWCEQOS_DMA_CH_CTRL_START);
+
+       /* Enable MAC TX/RX */
+       regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
+       dwceqos_write(lp, REG_DWCEQOS_MAC_CFG,
+                     regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
+}
+
+static void dwceqos_tx_reclaim(unsigned long data)
+{
+       struct net_device *ndev = (struct net_device *)data;
+       struct net_local *lp = netdev_priv(ndev);
+       unsigned int tx_bytes = 0;
+       unsigned int tx_packets = 0;
+
+       spin_lock(&lp->tx_lock);
+
+       while (lp->tx_free < DWCEQOS_TX_DCNT) {
+               struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_cur];
+               struct ring_desc *rd = &lp->tx_skb[lp->tx_cur];
+
+               /* Descriptor still being held by DMA ? */
+               if (dd->des3 & DWCEQOS_DMA_TDES3_OWN)
+                       break;
+
+               if (rd->mapping)
+                       dma_unmap_single(ndev->dev.parent, rd->mapping, rd->len,
+                                        DMA_TO_DEVICE);
+
+               if (unlikely(rd->skb)) {
+                       ++tx_packets;
+                       tx_bytes += rd->skb->len;
+                       dev_consume_skb_any(rd->skb);
+               }
+
+               rd->skb = NULL;
+               rd->mapping = 0;
+               lp->tx_free++;
+               lp->tx_cur = (lp->tx_cur + 1) % DWCEQOS_TX_DCNT;
+
+               if ((dd->des3 & DWCEQOS_DMA_TDES3_LD) &&
+                   (dd->des3 & DWCEQOS_DMA_RDES3_ES)) {
+                       if (netif_msg_tx_err(lp))
+                               netdev_err(ndev, "TX Error, TDES3 = 0x%x\n",
+                                          dd->des3);
+                       if (netif_msg_hw(lp))
+                               print_status(lp);
+               }
+       }
+       spin_unlock(&lp->tx_lock);
+
+       netdev_completed_queue(ndev, tx_packets, tx_bytes);
+
+       dwceqos_dma_enable_txirq(lp);
+       netif_wake_queue(ndev);
+}
+
+static int dwceqos_rx(struct net_local *lp, int budget)
+{
+       struct sk_buff *skb;
+       u32 tot_size = 0;
+       unsigned int n_packets = 0;
+       unsigned int n_descs = 0;
+       u32 len;
+
+       struct dwceqos_dma_desc *dd;
+       struct sk_buff *new_skb;
+       dma_addr_t new_skb_baddr = 0;
+
+       while (n_descs < budget) {
+               if (!dwceqos_packet_avail(lp))
+                       break;
+
+               new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE);
+               if (!new_skb) {
+                       netdev_err(lp->ndev, "no memory for new sk_buff\n");
+                       break;
+               }
+
+               /* Get dma handle of skb->data */
+               new_skb_baddr = (u32)dma_map_single(lp->ndev->dev.parent,
+                                       new_skb->data,
+                                       DWCEQOS_RX_BUF_SIZE,
+                                       DMA_FROM_DEVICE);
+               if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) {
+                       netdev_err(lp->ndev, "DMA map error\n");
+                       dev_kfree_skb(new_skb);
+                       break;
+               }
+
+               /* Read descriptor data after reading owner bit. */
+               dma_rmb();
+
+               dd = &lp->rx_descs[lp->rx_cur];
+               len = DWCEQOS_DMA_RDES3_PL(dd->des3);
+               skb = lp->rx_skb[lp->rx_cur].skb;
+
+               /* Unmap old buffer */
+               dma_unmap_single(lp->ndev->dev.parent,
+                                lp->rx_skb[lp->rx_cur].mapping,
+                                lp->rx_skb[lp->rx_cur].len, DMA_FROM_DEVICE);
+
+               /* Discard packet on reception error or bad checksum */
+               if ((dd->des3 & DWCEQOS_DMA_RDES3_ES) ||
+                   (dd->des1 & DWCEQOS_DMA_RDES1_IPCE)) {
+                       dev_kfree_skb(skb);
+                       skb = NULL;
+               } else {
+                       skb_put(skb, len);
+                       skb->protocol = eth_type_trans(skb, lp->ndev);
+                       switch (dd->des1 & DWCEQOS_DMA_RDES1_PT) {
+                       case DWCEQOS_DMA_RDES1_PT_UDP:
+                       case DWCEQOS_DMA_RDES1_PT_TCP:
+                       case DWCEQOS_DMA_RDES1_PT_ICMP:
+                               skb->ip_summed = CHECKSUM_UNNECESSARY;
+                               break;
+                       default:
+                               skb->ip_summed = CHECKSUM_NONE;
+                               break;
+                       }
+               }
+
+               if (unlikely(!skb)) {
+                       if (netif_msg_rx_err(lp))
+                               netdev_dbg(lp->ndev, "rx error: des3=%X\n",
+                                          lp->rx_descs[lp->rx_cur].des3);
+               } else {
+                       tot_size += skb->len;
+                       n_packets++;
+
+                       netif_receive_skb(skb);
+               }
+
+               lp->rx_descs[lp->rx_cur].des0 = new_skb_baddr;
+               lp->rx_descs[lp->rx_cur].des1 = 0;
+               lp->rx_descs[lp->rx_cur].des2 = 0;
+               /* The DMA must observe des0/1/2 written before des3. */
+               wmb();
+               lp->rx_descs[lp->rx_cur].des3 = DWCEQOS_DMA_RDES3_INTE |
+                                               DWCEQOS_DMA_RDES3_OWN  |
+                                               DWCEQOS_DMA_RDES3_BUF1V;
+
+               lp->rx_skb[lp->rx_cur].mapping = new_skb_baddr;
+               lp->rx_skb[lp->rx_cur].len = DWCEQOS_RX_BUF_SIZE;
+               lp->rx_skb[lp->rx_cur].skb = new_skb;
+
+               n_descs++;
+               lp->rx_cur = (lp->rx_cur + 1) % DWCEQOS_RX_DCNT;
+       }
+
+       /* Make sure any ownership update is written to the descriptors before
+        * DMA wakeup.
+        */
+       wmb();
+
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, DWCEQOS_DMA_CH0_IS_RI);
+       /* Wake up RX by writing tail pointer */
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL,
+                     lp->rx_descs_tail_addr);
+
+       return n_descs;
+}
+
+static int dwceqos_rx_poll(struct napi_struct *napi, int budget)
+{
+       struct net_local *lp = container_of(napi, struct net_local, napi);
+       int work_done = 0;
+
+       work_done = dwceqos_rx(lp, budget - work_done);
+
+       if (!dwceqos_packet_avail(lp) && work_done < budget) {
+               napi_complete(napi);
+               dwceqos_dma_enable_rxirq(lp);
+       } else {
+               work_done = budget;
+       }
+
+       return work_done;
+}
+
+/* Reinitialize function if a TX timed out */
+static void dwceqos_reinit_for_txtimeout(struct work_struct *data)
+{
+       struct net_local *lp = container_of(data, struct net_local,
+               txtimeout_reinit);
+
+       netdev_err(lp->ndev, "transmit timeout %d s, resetting...\n",
+                  DWCEQOS_TX_TIMEOUT);
+
+       if (netif_msg_hw(lp))
+               print_status(lp);
+
+       rtnl_lock();
+       dwceqos_stop(lp->ndev);
+       dwceqos_open(lp->ndev);
+       rtnl_unlock();
+}
+
+/* DT Probing function called by main probe */
+static inline int dwceqos_probe_config_dt(struct platform_device *pdev)
+{
+       struct net_device *ndev;
+       struct net_local *lp;
+       const void *mac_address;
+       struct dwceqos_bus_cfg *bus_cfg;
+       struct device_node *np = pdev->dev.of_node;
+
+       ndev = platform_get_drvdata(pdev);
+       lp = netdev_priv(ndev);
+       bus_cfg = &lp->bus_cfg;
+
+       /* Set the MAC address. */
+       mac_address = of_get_mac_address(pdev->dev.of_node);
+       if (mac_address)
+               ether_addr_copy(ndev->dev_addr, mac_address);
+
+       /* These are all optional parameters */
+       lp->en_tx_lpi_clockgating =  of_property_read_bool(np,
+               "snps,en-tx-lpi-clockgating");
+       bus_cfg->en_lpi = of_property_read_bool(np, "snps,en-lpi");
+       of_property_read_u32(np, "snps,write-requests",
+                            &bus_cfg->write_requests);
+       of_property_read_u32(np, "snps,read-requests", &bus_cfg->read_requests);
+       of_property_read_u32(np, "snps,burst-map", &bus_cfg->burst_map);
+       of_property_read_u32(np, "snps,txpbl", &bus_cfg->tx_pbl);
+       of_property_read_u32(np, "snps,rxpbl", &bus_cfg->rx_pbl);
+
+       netdev_dbg(ndev, "BusCfg: lpi:%u wr:%u rr:%u bm:%X rxpbl:%u txpbl:%d\n",
+                  bus_cfg->en_lpi,
+                  bus_cfg->write_requests,
+                  bus_cfg->read_requests,
+                  bus_cfg->burst_map,
+                  bus_cfg->rx_pbl,
+                  bus_cfg->tx_pbl);
+
+       return 0;
+}
+
+static int dwceqos_open(struct net_device *ndev)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       int res;
+
+       dwceqos_reset_state(lp);
+       res = dwceqos_descriptor_init(lp);
+       if (res) {
+               netdev_err(ndev, "Unable to allocate DMA memory, rc %d\n", res);
+               return res;
+       }
+       netdev_reset_queue(ndev);
+
+       napi_enable(&lp->napi);
+       phy_start(lp->phy_dev);
+       dwceqos_init_hw(lp);
+
+       netif_start_queue(ndev);
+       tasklet_enable(&lp->tx_bdreclaim_tasklet);
+
+       return 0;
+}
+
+static bool dweqos_is_tx_dma_suspended(struct net_local *lp)
+{
+       u32 reg;
+
+       reg = dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0);
+       reg = DMA_GET_TX_STATE_CH0(reg);
+
+       return reg == DMA_TX_CH_SUSPENDED;
+}
+
+static void dwceqos_drain_dma(struct net_local *lp)
+{
+       /* Wait for all pending TX buffers to be sent. Upper limit based
+        * on max frame size on a 10 Mbit link.
+        */
+       size_t limit = (DWCEQOS_TX_DCNT * 1250) / 100;
+
+       while (!dweqos_is_tx_dma_suspended(lp) && limit--)
+               usleep_range(100, 200);
+}
+
+static int dwceqos_stop(struct net_device *ndev)
+{
+       struct net_local *lp = netdev_priv(ndev);
+
+       phy_stop(lp->phy_dev);
+
+       tasklet_disable(&lp->tx_bdreclaim_tasklet);
+       netif_stop_queue(ndev);
+       napi_disable(&lp->napi);
+
+       dwceqos_drain_dma(lp);
+
+       netif_tx_lock(lp->ndev);
+       dwceqos_reset_hw(lp);
+       dwceqos_descriptor_free(lp);
+       netif_tx_unlock(lp->ndev);
+
+       return 0;
+}
+
+static void dwceqos_dmadesc_set_ctx(struct net_local *lp,
+                                   unsigned short gso_size)
+{
+       struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_next];
+
+       dd->des0 = 0;
+       dd->des1 = 0;
+       dd->des2 = gso_size;
+       dd->des3 = DWCEQOS_DMA_TDES3_CTXT | DWCEQOS_DMA_TDES3_TCMSSV;
+
+       lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+}
+
+static void dwceqos_tx_poll_demand(struct net_local *lp)
+{
+       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL,
+                     lp->tx_descs_tail_addr);
+}
+
+struct dwceqos_tx {
+       size_t nr_descriptors;
+       size_t initial_descriptor;
+       size_t last_descriptor;
+       size_t prev_gso_size;
+       size_t network_header_len;
+};
+
+static void dwceqos_tx_prepare(struct sk_buff *skb, struct net_local *lp,
+                              struct dwceqos_tx *tx)
+{
+       size_t n = 1;
+       size_t i;
+
+       if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size)
+               ++n;
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+               n +=  (skb_frag_size(frag) + BYTES_PER_DMA_DESC - 1) /
+                      BYTES_PER_DMA_DESC;
+       }
+
+       tx->nr_descriptors = n;
+       tx->initial_descriptor = lp->tx_next;
+       tx->last_descriptor = lp->tx_next;
+       tx->prev_gso_size = lp->gso_size;
+
+       tx->network_header_len = skb_transport_offset(skb);
+       if (skb_is_gso(skb))
+               tx->network_header_len += tcp_hdrlen(skb);
+}
+
+static int dwceqos_tx_linear(struct sk_buff *skb, struct net_local *lp,
+                            struct dwceqos_tx *tx)
+{
+       struct ring_desc *rd;
+       struct dwceqos_dma_desc *dd;
+       size_t payload_len;
+       dma_addr_t dma_handle;
+
+       if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) {
+               dwceqos_dmadesc_set_ctx(lp, skb_shinfo(skb)->gso_size);
+               lp->gso_size = skb_shinfo(skb)->gso_size;
+       }
+
+       dma_handle = dma_map_single(lp->ndev->dev.parent, skb->data,
+                                   skb_headlen(skb), DMA_TO_DEVICE);
+
+       if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) {
+               netdev_err(lp->ndev, "TX DMA Mapping error\n");
+               return -ENOMEM;
+       }
+
+       rd = &lp->tx_skb[lp->tx_next];
+       dd = &lp->tx_descs[lp->tx_next];
+
+       rd->skb = NULL;
+       rd->len = skb_headlen(skb);
+       rd->mapping = dma_handle;
+
+       /* Set up DMA Descriptor */
+       dd->des0 = dma_handle;
+
+       if (skb_is_gso(skb)) {
+               payload_len = skb_headlen(skb) - tx->network_header_len;
+
+               if (payload_len)
+                       dd->des1 = dma_handle + tx->network_header_len;
+               dd->des2 = tx->network_header_len |
+                       DWCEQOS_DMA_DES2_B2L(payload_len);
+               dd->des3 = DWCEQOS_DMA_TDES3_TSE |
+                       DWCEQOS_DMA_DES3_THL((tcp_hdrlen(skb) / 4)) |
+                       (skb->len - tx->network_header_len);
+       } else {
+               dd->des1 = 0;
+               dd->des2 = skb_headlen(skb);
+               dd->des3 = skb->len;
+
+               switch (skb->ip_summed) {
+               case CHECKSUM_PARTIAL:
+                       dd->des3 |= DWCEQOS_DMA_TDES3_CA;
+               case CHECKSUM_NONE:
+               case CHECKSUM_UNNECESSARY:
+               case CHECKSUM_COMPLETE:
+               default:
+                       break;
+               }
+       }
+
+       dd->des3 |= DWCEQOS_DMA_TDES3_FD;
+       if (lp->tx_next  != tx->initial_descriptor)
+               dd->des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+       tx->last_descriptor = lp->tx_next;
+       lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+
+       return 0;
+}
+
+static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp,
+                           struct dwceqos_tx *tx)
+{
+       struct ring_desc *rd = NULL;
+       struct dwceqos_dma_desc *dd;
+       dma_addr_t dma_handle;
+       size_t i;
+
+       /* Setup more ring and DMA descriptor if the packet is fragmented */
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+               size_t frag_size;
+               size_t consumed_size;
+
+               /* Map DMA Area */
+               dma_handle = skb_frag_dma_map(lp->ndev->dev.parent, frag, 0,
+                                             skb_frag_size(frag),
+                                             DMA_TO_DEVICE);
+               if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) {
+                       netdev_err(lp->ndev, "DMA Mapping error\n");
+                       return -ENOMEM;
+               }
+
+               /* order-3 fragments span more than one descriptor. */
+               frag_size = skb_frag_size(frag);
+               consumed_size = 0;
+               while (consumed_size < frag_size) {
+                       size_t dma_size = min_t(size_t, 16376,
+                                               frag_size - consumed_size);
+
+                       rd = &lp->tx_skb[lp->tx_next];
+                       memset(rd, 0, sizeof(*rd));
+
+                       dd = &lp->tx_descs[lp->tx_next];
+
+                       /* Set DMA Descriptor fields */
+                       dd->des0 = dma_handle;
+                       dd->des1 = 0;
+                       dd->des2 = dma_size;
+
+                       if (skb_is_gso(skb))
+                               dd->des3 = (skb->len - tx->network_header_len);
+                       else
+                               dd->des3 = skb->len;
+
+                       dd->des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+                       tx->last_descriptor = lp->tx_next;
+                       lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
+                       consumed_size += dma_size;
+               }
+
+               rd->len = skb_frag_size(frag);
+               rd->mapping = dma_handle;
+       }
+
+       return 0;
+}
+
+static void dwceqos_tx_finalize(struct sk_buff *skb, struct net_local *lp,
+                               struct dwceqos_tx *tx)
+{
+       lp->tx_descs[tx->last_descriptor].des3 |= DWCEQOS_DMA_TDES3_LD;
+       lp->tx_descs[tx->last_descriptor].des2 |= DWCEQOS_DMA_TDES2_IOC;
+
+       lp->tx_skb[tx->last_descriptor].skb = skb;
+
+       /* Make all descriptor updates visible to the DMA before setting the
+        * owner bit.
+        */
+       wmb();
+
+       lp->tx_descs[tx->initial_descriptor].des3 |= DWCEQOS_DMA_TDES3_OWN;
+
+       /* Make the owner bit visible before TX wakeup. */
+       wmb();
+
+       dwceqos_tx_poll_demand(lp);
+}
+
+static void dwceqos_tx_rollback(struct net_local *lp, struct dwceqos_tx *tx)
+{
+       size_t i = tx->initial_descriptor;
+
+       while (i != lp->tx_next) {
+               if (lp->tx_skb[i].mapping)
+                       dma_unmap_single(lp->ndev->dev.parent,
+                                        lp->tx_skb[i].mapping,
+                                        lp->tx_skb[i].len,
+                                        DMA_TO_DEVICE);
+
+               lp->tx_skb[i].mapping = 0;
+               lp->tx_skb[i].skb = NULL;
+
+               memset(&lp->tx_descs[i], 0, sizeof(lp->tx_descs[i]));
+
+               i = (i + 1) % DWCEQOS_TX_DCNT;
+       }
+
+       lp->tx_next = tx->initial_descriptor;
+       lp->gso_size = tx->prev_gso_size;
+}
+
+static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       struct dwceqos_tx trans;
+       int err;
+
+       dwceqos_tx_prepare(skb, lp, &trans);
+       if (lp->tx_free < trans.nr_descriptors) {
+               netif_stop_queue(ndev);
+               return NETDEV_TX_BUSY;
+       }
+
+       err = dwceqos_tx_linear(skb, lp, &trans);
+       if (err)
+               goto tx_error;
+
+       err = dwceqos_tx_frags(skb, lp, &trans);
+       if (err)
+               goto tx_error;
+
+       WARN_ON(lp->tx_next !=
+               ((trans.initial_descriptor + trans.nr_descriptors) %
+                DWCEQOS_TX_DCNT));
+
+       dwceqos_tx_finalize(skb, lp, &trans);
+
+       netdev_sent_queue(ndev, skb->len);
+
+       spin_lock_bh(&lp->tx_lock);
+       lp->tx_free -= trans.nr_descriptors;
+       spin_unlock_bh(&lp->tx_lock);
+
+       ndev->trans_start = jiffies;
+       return 0;
+
+tx_error:
+       dwceqos_tx_rollback(lp, &trans);
+       dev_kfree_skb(skb);
+       return 0;
+}
+
+/* Set MAC address and then update HW accordingly */
+static int dwceqos_set_mac_address(struct net_device *ndev, void *addr)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       struct sockaddr *hwaddr = (struct sockaddr *)addr;
+
+       if (netif_running(ndev))
+               return -EBUSY;
+
+       if (!is_valid_ether_addr(hwaddr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       memcpy(ndev->dev_addr, hwaddr->sa_data, ndev->addr_len);
+
+       dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+       return 0;
+}
+
+static void dwceqos_tx_timeout(struct net_device *ndev)
+{
+       struct net_local *lp = netdev_priv(ndev);
+
+       queue_work(lp->txtimeout_handler_wq, &lp->txtimeout_reinit);
+}
+
+static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr,
+                                 unsigned int reg_n)
+{
+       unsigned long data;
+
+       data = (addr[5] << 8) | addr[4];
+       dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n),
+                     data | DWCEQOS_MAC_MAC_ADDR_HI_EN);
+       data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+       dwceqos_write(lp, DWCEQOS_ADDR_LOW(reg_n), data);
+}
+
+static void dwceqos_disable_umac_addr(struct net_local *lp, unsigned int reg_n)
+{
+       /* Do not disable MAC address 0 */
+       if (reg_n != 0)
+               dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), 0);
+}
+
+static void dwceqos_set_rx_mode(struct net_device *ndev)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       u32 regval = 0;
+       u32 mc_filter[2];
+       int reg = 1;
+       struct netdev_hw_addr *ha;
+       unsigned int max_mac_addr;
+
+       max_mac_addr = DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1);
+
+       if (ndev->flags & IFF_PROMISC) {
+               regval = DWCEQOS_MAC_PKT_FILT_PR;
+       } else if (((netdev_mc_count(ndev) > DWCEQOS_HASH_TABLE_SIZE) ||
+                               (ndev->flags & IFF_ALLMULTI))) {
+               regval = DWCEQOS_MAC_PKT_FILT_PM;
+               dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, 0xffffffff);
+               dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, 0xffffffff);
+       } else if (!netdev_mc_empty(ndev)) {
+               regval = DWCEQOS_MAC_PKT_FILT_HMC;
+               memset(mc_filter, 0, sizeof(mc_filter));
+               netdev_for_each_mc_addr(ha, ndev) {
+                       /* The upper 6 bits of the calculated CRC are used to
+                        * index the contens of the hash table
+                        */
+                       int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
+                       /* The most significant bit determines the register
+                        * to use (H/L) while the other 5 bits determine
+                        * the bit within the register.
+                        */
+                       mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+               }
+               dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, mc_filter[0]);
+               dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, mc_filter[1]);
+       }
+       if (netdev_uc_count(ndev) > max_mac_addr) {
+               regval |= DWCEQOS_MAC_PKT_FILT_PR;
+       } else {
+               netdev_for_each_uc_addr(ha, ndev) {
+                       dwceqos_set_umac_addr(lp, ha->addr, reg);
+                       reg++;
+               }
+               for (; reg < DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); reg++)
+                       dwceqos_disable_umac_addr(lp, reg);
+       }
+       dwceqos_write(lp, REG_DWCEQOS_MAC_PKT_FILT, regval);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void dwceqos_poll_controller(struct net_device *ndev)
+{
+       disable_irq(ndev->irq);
+       dwceqos_interrupt(ndev->irq, ndev);
+       enable_irq(ndev->irq);
+}
+#endif
+
+static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
+                                     u32 tx_mask)
+{
+       if (tx_mask & BIT(27))
+               lp->mmc_counters.txlpitranscntr +=
+                       dwceqos_read(lp, DWC_MMC_TXLPITRANSCNTR);
+       if (tx_mask & BIT(26))
+               lp->mmc_counters.txpiuscntr +=
+                       dwceqos_read(lp, DWC_MMC_TXLPIUSCNTR);
+       if (tx_mask & BIT(25))
+               lp->mmc_counters.txoversize_g +=
+                       dwceqos_read(lp, DWC_MMC_TXOVERSIZE_G);
+       if (tx_mask & BIT(24))
+               lp->mmc_counters.txvlanpackets_g +=
+                       dwceqos_read(lp, DWC_MMC_TXVLANPACKETS_G);
+       if (tx_mask & BIT(23))
+               lp->mmc_counters.txpausepackets +=
+                       dwceqos_read(lp, DWC_MMC_TXPAUSEPACKETS);
+       if (tx_mask & BIT(22))
+               lp->mmc_counters.txexcessdef +=
+                       dwceqos_read(lp, DWC_MMC_TXEXCESSDEF);
+       if (tx_mask & BIT(21))
+               lp->mmc_counters.txpacketcount_g +=
+                       dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_G);
+       if (tx_mask & BIT(20))
+               lp->mmc_counters.txoctetcount_g +=
+                       dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_G);
+       if (tx_mask & BIT(19))
+               lp->mmc_counters.txcarriererror +=
+                       dwceqos_read(lp, DWC_MMC_TXCARRIERERROR);
+       if (tx_mask & BIT(18))
+               lp->mmc_counters.txexcesscol +=
+                       dwceqos_read(lp, DWC_MMC_TXEXCESSCOL);
+       if (tx_mask & BIT(17))
+               lp->mmc_counters.txlatecol +=
+                       dwceqos_read(lp, DWC_MMC_TXLATECOL);
+       if (tx_mask & BIT(16))
+               lp->mmc_counters.txdeferred +=
+                       dwceqos_read(lp, DWC_MMC_TXDEFERRED);
+       if (tx_mask & BIT(15))
+               lp->mmc_counters.txmulticol_g +=
+                       dwceqos_read(lp, DWC_MMC_TXMULTICOL_G);
+       if (tx_mask & BIT(14))
+               lp->mmc_counters.txsinglecol_g +=
+                       dwceqos_read(lp, DWC_MMC_TXSINGLECOL_G);
+       if (tx_mask & BIT(13))
+               lp->mmc_counters.txunderflowerror +=
+                       dwceqos_read(lp, DWC_MMC_TXUNDERFLOWERROR);
+       if (tx_mask & BIT(12))
+               lp->mmc_counters.txbroadcastpackets_gb +=
+                       dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_GB);
+       if (tx_mask & BIT(11))
+               lp->mmc_counters.txmulticastpackets_gb +=
+                       dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_GB);
+       if (tx_mask & BIT(10))
+               lp->mmc_counters.txunicastpackets_gb +=
+                       dwceqos_read(lp, DWC_MMC_TXUNICASTPACKETS_GB);
+       if (tx_mask & BIT(9))
+               lp->mmc_counters.tx1024tomaxoctets_gb +=
+                       dwceqos_read(lp, DWC_MMC_TX1024TOMAXOCTETS_GB);
+       if (tx_mask & BIT(8))
+               lp->mmc_counters.tx512to1023octets_gb +=
+                       dwceqos_read(lp, DWC_MMC_TX512TO1023OCTETS_GB);
+       if (tx_mask & BIT(7))
+               lp->mmc_counters.tx256to511octets_gb +=
+                       dwceqos_read(lp, DWC_MMC_TX256TO511OCTETS_GB);
+       if (tx_mask & BIT(6))
+               lp->mmc_counters.tx128to255octets_gb +=
+                       dwceqos_read(lp, DWC_MMC_TX128TO255OCTETS_GB);
+       if (tx_mask & BIT(5))
+               lp->mmc_counters.tx65to127octets_gb +=
+                       dwceqos_read(lp, DWC_MMC_TX65TO127OCTETS_GB);
+       if (tx_mask & BIT(4))
+               lp->mmc_counters.tx64octets_gb +=
+                       dwceqos_read(lp, DWC_MMC_TX64OCTETS_GB);
+       if (tx_mask & BIT(3))
+               lp->mmc_counters.txmulticastpackets_g +=
+                       dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_G);
+       if (tx_mask & BIT(2))
+               lp->mmc_counters.txbroadcastpackets_g +=
+                       dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_G);
+       if (tx_mask & BIT(1))
+               lp->mmc_counters.txpacketcount_gb +=
+                       dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_GB);
+       if (tx_mask & BIT(0))
+               lp->mmc_counters.txoctetcount_gb +=
+                       dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_GB);
+
+       if (rx_mask & BIT(27))
+               lp->mmc_counters.rxlpitranscntr +=
+                       dwceqos_read(lp, DWC_MMC_RXLPITRANSCNTR);
+       if (rx_mask & BIT(26))
+               lp->mmc_counters.rxlpiuscntr +=
+                       dwceqos_read(lp, DWC_MMC_RXLPIUSCNTR);
+       if (rx_mask & BIT(25))
+               lp->mmc_counters.rxctrlpackets_g +=
+                       dwceqos_read(lp, DWC_MMC_RXCTRLPACKETS_G);
+       if (rx_mask & BIT(24))
+               lp->mmc_counters.rxrcverror +=
+                       dwceqos_read(lp, DWC_MMC_RXRCVERROR);
+       if (rx_mask & BIT(23))
+               lp->mmc_counters.rxwatchdog +=
+                       dwceqos_read(lp, DWC_MMC_RXWATCHDOG);
+       if (rx_mask & BIT(22))
+               lp->mmc_counters.rxvlanpackets_gb +=
+                       dwceqos_read(lp, DWC_MMC_RXVLANPACKETS_GB);
+       if (rx_mask & BIT(21))
+               lp->mmc_counters.rxfifooverflow +=
+                       dwceqos_read(lp, DWC_MMC_RXFIFOOVERFLOW);
+       if (rx_mask & BIT(20))
+               lp->mmc_counters.rxpausepackets +=
+                       dwceqos_read(lp, DWC_MMC_RXPAUSEPACKETS);
+       if (rx_mask & BIT(19))
+               lp->mmc_counters.rxoutofrangetype +=
+                       dwceqos_read(lp, DWC_MMC_RXOUTOFRANGETYPE);
+       if (rx_mask & BIT(18))
+               lp->mmc_counters.rxlengtherror +=
+                       dwceqos_read(lp, DWC_MMC_RXLENGTHERROR);
+       if (rx_mask & BIT(17))
+               lp->mmc_counters.rxunicastpackets_g +=
+                       dwceqos_read(lp, DWC_MMC_RXUNICASTPACKETS_G);
+       if (rx_mask & BIT(16))
+               lp->mmc_counters.rx1024tomaxoctets_gb +=
+                       dwceqos_read(lp, DWC_MMC_RX1024TOMAXOCTETS_GB);
+       if (rx_mask & BIT(15))
+               lp->mmc_counters.rx512to1023octets_gb +=
+                       dwceqos_read(lp, DWC_MMC_RX512TO1023OCTETS_GB);
+       if (rx_mask & BIT(14))
+               lp->mmc_counters.rx256to511octets_gb +=
+                       dwceqos_read(lp, DWC_MMC_RX256TO511OCTETS_GB);
+       if (rx_mask & BIT(13))
+               lp->mmc_counters.rx128to255octets_gb +=
+                       dwceqos_read(lp, DWC_MMC_RX128TO255OCTETS_GB);
+       if (rx_mask & BIT(12))
+               lp->mmc_counters.rx65to127octets_gb +=
+                       dwceqos_read(lp, DWC_MMC_RX65TO127OCTETS_GB);
+       if (rx_mask & BIT(11))
+               lp->mmc_counters.rx64octets_gb +=
+                       dwceqos_read(lp, DWC_MMC_RX64OCTETS_GB);
+       if (rx_mask & BIT(10))
+               lp->mmc_counters.rxoversize_g +=
+                       dwceqos_read(lp, DWC_MMC_RXOVERSIZE_G);
+       if (rx_mask & BIT(9))
+               lp->mmc_counters.rxundersize_g +=
+                       dwceqos_read(lp, DWC_MMC_RXUNDERSIZE_G);
+       if (rx_mask & BIT(8))
+               lp->mmc_counters.rxjabbererror +=
+                       dwceqos_read(lp, DWC_MMC_RXJABBERERROR);
+       if (rx_mask & BIT(7))
+               lp->mmc_counters.rxrunterror +=
+                       dwceqos_read(lp, DWC_MMC_RXRUNTERROR);
+       if (rx_mask & BIT(6))
+               lp->mmc_counters.rxalignmenterror +=
+                       dwceqos_read(lp, DWC_MMC_RXALIGNMENTERROR);
+       if (rx_mask & BIT(5))
+               lp->mmc_counters.rxcrcerror +=
+                       dwceqos_read(lp, DWC_MMC_RXCRCERROR);
+       if (rx_mask & BIT(4))
+               lp->mmc_counters.rxmulticastpackets_g +=
+                       dwceqos_read(lp, DWC_MMC_RXMULTICASTPACKETS_G);
+       if (rx_mask & BIT(3))
+               lp->mmc_counters.rxbroadcastpackets_g +=
+                       dwceqos_read(lp, DWC_MMC_RXBROADCASTPACKETS_G);
+       if (rx_mask & BIT(2))
+               lp->mmc_counters.rxoctetcount_g +=
+                       dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_G);
+       if (rx_mask & BIT(1))
+               lp->mmc_counters.rxoctetcount_gb +=
+                       dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_GB);
+       if (rx_mask & BIT(0))
+               lp->mmc_counters.rxpacketcount_gb +=
+                       dwceqos_read(lp, DWC_MMC_RXPACKETCOUNT_GB);
+}
+
+static struct rtnl_link_stats64*
+dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s)
+{
+       unsigned long flags;
+       struct net_local *lp = netdev_priv(ndev);
+       struct dwceqos_mmc_counters *hwstats = &lp->mmc_counters;
+
+       spin_lock_irqsave(&lp->stats_lock, flags);
+       dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask,
+                                 lp->mmc_tx_counters_mask);
+       spin_unlock_irqrestore(&lp->stats_lock, flags);
+
+       s->rx_packets = hwstats->rxpacketcount_gb;
+       s->rx_bytes = hwstats->rxoctetcount_gb;
+       s->rx_errors = hwstats->rxpacketcount_gb -
+               hwstats->rxbroadcastpackets_g -
+               hwstats->rxmulticastpackets_g -
+               hwstats->rxunicastpackets_g;
+       s->multicast = hwstats->rxmulticastpackets_g;
+       s->rx_length_errors = hwstats->rxlengtherror;
+       s->rx_crc_errors = hwstats->rxcrcerror;
+       s->rx_fifo_errors = hwstats->rxfifooverflow;
+
+       s->tx_packets = hwstats->txpacketcount_gb;
+       s->tx_bytes = hwstats->txoctetcount_gb;
+
+       if (lp->mmc_tx_counters_mask & BIT(21))
+               s->tx_errors = hwstats->txpacketcount_gb -
+                       hwstats->txpacketcount_g;
+       else
+               s->tx_errors = hwstats->txunderflowerror +
+                       hwstats->txcarriererror;
+
+       return s;
+}
+
+static int
+dwceqos_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       struct phy_device *phydev = lp->phy_dev;
+
+       if (!phydev)
+               return -ENODEV;
+
+       return phy_ethtool_gset(phydev, ecmd);
+}
+
+static int
+dwceqos_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       struct phy_device *phydev = lp->phy_dev;
+
+       if (!phydev)
+               return -ENODEV;
+
+       return phy_ethtool_sset(phydev, ecmd);
+}
+
+static void
+dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed)
+{
+       const struct net_local *lp = netdev_priv(ndev);
+
+       strcpy(ed->driver, lp->pdev->dev.driver->name);
+       strcpy(ed->version, DRIVER_VERSION);
+}
+
+static void dwceqos_get_pauseparam(struct net_device *ndev,
+                                  struct ethtool_pauseparam *pp)
+{
+       const struct net_local *lp = netdev_priv(ndev);
+
+       pp->autoneg = lp->flowcontrol.autoneg;
+       pp->tx_pause = lp->flowcontrol.tx;
+       pp->rx_pause = lp->flowcontrol.rx;
+}
+
+static int dwceqos_set_pauseparam(struct net_device *ndev,
+                                 struct ethtool_pauseparam *pp)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       int ret = 0;
+
+       lp->flowcontrol.autoneg = pp->autoneg;
+       if (pp->autoneg) {
+               lp->phy_dev->advertising |= ADVERTISED_Pause;
+               lp->phy_dev->advertising |= ADVERTISED_Asym_Pause;
+       } else {
+               lp->phy_dev->advertising &= ~ADVERTISED_Pause;
+               lp->phy_dev->advertising &= ~ADVERTISED_Asym_Pause;
+               lp->flowcontrol.rx = pp->rx_pause;
+               lp->flowcontrol.tx = pp->tx_pause;
+       }
+
+       if (netif_running(ndev))
+               ret = phy_start_aneg(lp->phy_dev);
+
+       return ret;
+}
+
+static void dwceqos_get_strings(struct net_device *ndev, u32 stringset,
+                               u8 *data)
+{
+       size_t i;
+
+       if (stringset != ETH_SS_STATS)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) {
+               memcpy(data, dwceqos_ethtool_stats[i].stat_name,
+                      ETH_GSTRING_LEN);
+               data += ETH_GSTRING_LEN;
+       }
+}
+
+static void dwceqos_get_ethtool_stats(struct net_device *ndev,
+                                     struct ethtool_stats *stats, u64 *data)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       unsigned long flags;
+       size_t i;
+       u8 *mmcstat = (u8 *)&lp->mmc_counters;
+
+       spin_lock_irqsave(&lp->stats_lock, flags);
+       dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask,
+                                 lp->mmc_tx_counters_mask);
+       spin_unlock_irqrestore(&lp->stats_lock, flags);
+
+       for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) {
+               memcpy(data,
+                      mmcstat + dwceqos_ethtool_stats[i].offset,
+                      sizeof(u64));
+               data++;
+       }
+}
+
+static int dwceqos_get_sset_count(struct net_device *ndev, int sset)
+{
+       if (sset == ETH_SS_STATS)
+               return ARRAY_SIZE(dwceqos_ethtool_stats);
+
+       return -EOPNOTSUPP;
+}
+
+static void dwceqos_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+                            void *space)
+{
+       const struct net_local *lp = netdev_priv(dev);
+       u32 *reg_space = (u32 *)space;
+       int reg_offset;
+       int reg_ix = 0;
+
+       /* MAC registers */
+       for (reg_offset = START_MAC_REG_OFFSET;
+               reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
+               reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+               reg_ix++;
+       }
+       /* MTL registers */
+       for (reg_offset = START_MTL_REG_OFFSET;
+               reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) {
+               reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+               reg_ix++;
+       }
+
+       /* DMA registers */
+       for (reg_offset = START_DMA_REG_OFFSET;
+               reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
+               reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
+               reg_ix++;
+       }
+
+       BUG_ON(4 * reg_ix > REG_SPACE_SIZE);
+}
+
+static int dwceqos_get_regs_len(struct net_device *dev)
+{
+       return REG_SPACE_SIZE;
+}
+
+static inline const char *dwceqos_get_rx_lpi_state(u32 lpi_ctrl)
+{
+       return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST) ? "on" : "off";
+}
+
+static inline const char *dwceqos_get_tx_lpi_state(u32 lpi_ctrl)
+{
+       return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST) ? "on" : "off";
+}
+
+static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       u32 lpi_status;
+       u32 lpi_enabled;
+
+       if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL))
+               return -EOPNOTSUPP;
+
+       edata->eee_active  = lp->eee_active;
+       edata->eee_enabled = lp->eee_enabled;
+       edata->tx_lpi_timer = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER);
+       lpi_status = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+       lpi_enabled = !!(lpi_status & DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA);
+       edata->tx_lpi_enabled = lpi_enabled;
+
+       if (netif_msg_hw(lp)) {
+               u32 regval;
+
+               regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+
+               netdev_info(lp->ndev, "MAC LPI State: RX:%s TX:%s\n",
+                           dwceqos_get_rx_lpi_state(regval),
+                           dwceqos_get_tx_lpi_state(regval));
+       }
+
+       return phy_ethtool_get_eee(lp->phy_dev, edata);
+}
+
+static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+       struct net_local *lp = netdev_priv(ndev);
+       u32 regval;
+       unsigned long flags;
+
+       if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL))
+               return -EOPNOTSUPP;
+
+       if (edata->eee_enabled && !lp->eee_active)
+               return -EOPNOTSUPP;
+
+       if (edata->tx_lpi_enabled) {
+               if (edata->tx_lpi_timer < DWCEQOS_LPI_TIMER_MIN ||
+                   edata->tx_lpi_timer > DWCEQOS_LPI_TIMER_MAX)
+                       return -EINVAL;
+       }
+
+       lp->eee_enabled = edata->eee_enabled;
+
+       if (edata->eee_enabled && edata->tx_lpi_enabled) {
+               dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER,
+                             edata->tx_lpi_timer);
+
+               spin_lock_irqsave(&lp->hw_lock, flags);
+               regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+               regval |= DWCEQOS_LPI_CTRL_ENABLE_EEE;
+               if (lp->en_tx_lpi_clockgating)
+                       regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE;
+               dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+               spin_unlock_irqrestore(&lp->hw_lock, flags);
+       } else {
+               spin_lock_irqsave(&lp->hw_lock, flags);
+               regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
+               regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE;
+               dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
+               spin_unlock_irqrestore(&lp->hw_lock, flags);
+       }
+
+       return phy_ethtool_set_eee(lp->phy_dev, edata);
+}
+
+static u32 dwceqos_get_msglevel(struct net_device *ndev)
+{
+       const struct net_local *lp = netdev_priv(ndev);
+
+       return lp->msg_enable;
+}
+
+static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel)
+{
+       struct net_local *lp = netdev_priv(ndev);
+
+       lp->msg_enable = msglevel;
+}
+
+static struct ethtool_ops dwceqos_ethtool_ops = {
+       .get_settings   = dwceqos_get_settings,
+       .set_settings   = dwceqos_set_settings,
+       .get_drvinfo    = dwceqos_get_drvinfo,
+       .get_link       = ethtool_op_get_link,
+       .get_pauseparam = dwceqos_get_pauseparam,
+       .set_pauseparam = dwceqos_set_pauseparam,
+       .get_strings    = dwceqos_get_strings,
+       .get_ethtool_stats = dwceqos_get_ethtool_stats,
+       .get_sset_count = dwceqos_get_sset_count,
+       .get_regs       = dwceqos_get_regs,
+       .get_regs_len   = dwceqos_get_regs_len,
+       .get_eee        = dwceqos_get_eee,
+       .set_eee        = dwceqos_set_eee,
+       .get_msglevel   = dwceqos_get_msglevel,
+       .set_msglevel   = dwceqos_set_msglevel,
+};
+
+static struct net_device_ops netdev_ops = {
+       .ndo_open               = dwceqos_open,
+       .ndo_stop               = dwceqos_stop,
+       .ndo_start_xmit         = dwceqos_start_xmit,
+       .ndo_set_rx_mode        = dwceqos_set_rx_mode,
+       .ndo_set_mac_address    = dwceqos_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = dwceqos_poll_controller,
+#endif
+       .ndo_do_ioctl           = dwceqos_ioctl,
+       .ndo_tx_timeout         = dwceqos_tx_timeout,
+       .ndo_get_stats64        = dwceqos_get_stats64,
+};
+
+static const struct of_device_id dwceq_of_match[] = {
+       { .compatible = "snps,dwc-qos-ethernet-4.10", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, dwceq_of_match);
+
+static int dwceqos_probe(struct platform_device *pdev)
+{
+       struct resource *r_mem = NULL;
+       struct net_device *ndev;
+       struct net_local *lp;
+       int ret = -ENXIO;
+
+       r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!r_mem) {
+               dev_err(&pdev->dev, "no IO resource defined.\n");
+               return -ENXIO;
+       }
+
+       ndev = alloc_etherdev(sizeof(*lp));
+       if (!ndev) {
+               dev_err(&pdev->dev, "etherdev allocation failed.\n");
+               return -ENOMEM;
+       }
+
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+
+       lp = netdev_priv(ndev);
+       lp->ndev = ndev;
+       lp->pdev = pdev;
+       lp->msg_enable = netif_msg_init(debug, DWCEQOS_MSG_DEFAULT);
+
+       spin_lock_init(&lp->tx_lock);
+       spin_lock_init(&lp->hw_lock);
+       spin_lock_init(&lp->stats_lock);
+
+       lp->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
+       if (IS_ERR(lp->apb_pclk)) {
+               dev_err(&pdev->dev, "apb_pclk clock not found.\n");
+               ret = PTR_ERR(lp->apb_pclk);
+               goto err_out_free_netdev;
+       }
+
+       ret = clk_prepare_enable(lp->apb_pclk);
+       if (ret) {
+               dev_err(&pdev->dev, "Unable to enable APER clock.\n");
+               goto err_out_free_netdev;
+       }
+
+       lp->baseaddr = devm_ioremap_resource(&pdev->dev, r_mem);
+       if (IS_ERR(lp->baseaddr)) {
+               dev_err(&pdev->dev, "failed to map baseaddress.\n");
+               ret = PTR_ERR(lp->baseaddr);
+               goto err_out_clk_dis_aper;
+       }
+
+       ndev->irq = platform_get_irq(pdev, 0);
+       ndev->watchdog_timeo = DWCEQOS_TX_TIMEOUT * HZ;
+       ndev->netdev_ops = &netdev_ops;
+       ndev->ethtool_ops = &dwceqos_ethtool_ops;
+       ndev->base_addr = r_mem->start;
+
+       dwceqos_get_hwfeatures(lp);
+       dwceqos_mdio_set_csr(lp);
+
+       ndev->hw_features = NETIF_F_SG;
+
+       if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN)
+               ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+
+       if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_TXCOESEL)
+               ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+       if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_RXCOESEL)
+               ndev->hw_features |= NETIF_F_RXCSUM;
+
+       ndev->features = ndev->hw_features;
+
+       netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
+
+       ret = register_netdev(ndev);
+       if (ret) {
+               dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+               goto err_out_clk_dis_aper;
+       }
+
+       lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
+       if (IS_ERR(lp->phy_ref_clk)) {
+               dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
+               ret = PTR_ERR(lp->phy_ref_clk);
+               goto err_out_unregister_netdev;
+       }
+
+       ret = clk_prepare_enable(lp->phy_ref_clk);
+       if (ret) {
+               dev_err(&pdev->dev, "Unable to enable device clock.\n");
+               goto err_out_unregister_netdev;
+       }
+
+       lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node,
+                                               "phy-handle", 0);
+       if (!lp->phy_node && of_phy_is_fixed_link(lp->pdev->dev.of_node)) {
+               ret = of_phy_register_fixed_link(lp->pdev->dev.of_node);
+               if (ret < 0) {
+                       dev_err(&pdev->dev, "invalid fixed-link");
+                       goto err_out_unregister_netdev;
+               }
+
+               lp->phy_node = of_node_get(lp->pdev->dev.of_node);
+       }
+
+       ret = of_get_phy_mode(lp->pdev->dev.of_node);
+       if (ret < 0) {
+               dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
+               goto err_out_unregister_clk_notifier;
+       }
+
+       lp->phy_interface = ret;
+
+       ret = dwceqos_mii_init(lp);
+       if (ret) {
+               dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
+               goto err_out_unregister_clk_notifier;
+       }
+
+       ret = dwceqos_mii_probe(ndev);
+       if (ret != 0) {
+               netdev_err(ndev, "mii_probe fail.\n");
+               ret = -ENXIO;
+               goto err_out_unregister_clk_notifier;
+       }
+
+       dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
+
+       tasklet_init(&lp->tx_bdreclaim_tasklet, dwceqos_tx_reclaim,
+                    (unsigned long)ndev);
+       tasklet_disable(&lp->tx_bdreclaim_tasklet);
+
+       lp->txtimeout_handler_wq = create_singlethread_workqueue(DRIVER_NAME);
+       INIT_WORK(&lp->txtimeout_reinit, dwceqos_reinit_for_txtimeout);
+
+       platform_set_drvdata(pdev, ndev);
+       ret = dwceqos_probe_config_dt(pdev);
+       if (ret) {
+               dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
+                       ret);
+               goto err_out_unregister_clk_notifier;
+       }
+       dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
+                pdev->id, ndev->base_addr, ndev->irq);
+
+       ret = devm_request_irq(&pdev->dev, ndev->irq, &dwceqos_interrupt, 0,
+                              ndev->name, ndev);
+       if (ret) {
+               dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
+                       ndev->irq, ret);
+               goto err_out_unregister_clk_notifier;
+       }
+
+       if (netif_msg_probe(lp))
+               netdev_dbg(ndev, "net_local@%p\n", lp);
+
+       return 0;
+
+err_out_unregister_clk_notifier:
+       clk_disable_unprepare(lp->phy_ref_clk);
+err_out_unregister_netdev:
+       unregister_netdev(ndev);
+err_out_clk_dis_aper:
+       clk_disable_unprepare(lp->apb_pclk);
+err_out_free_netdev:
+       if (lp->phy_node)
+               of_node_put(lp->phy_node);
+       free_netdev(ndev);
+       platform_set_drvdata(pdev, NULL);
+       return ret;
+}
+
+static int dwceqos_remove(struct platform_device *pdev)
+{
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct net_local *lp;
+
+       if (ndev) {
+               lp = netdev_priv(ndev);
+
+               if (lp->phy_dev)
+                       phy_disconnect(lp->phy_dev);
+               mdiobus_unregister(lp->mii_bus);
+               kfree(lp->mii_bus->irq);
+               mdiobus_free(lp->mii_bus);
+
+               unregister_netdev(ndev);
+
+               clk_disable_unprepare(lp->phy_ref_clk);
+               clk_disable_unprepare(lp->apb_pclk);
+
+               free_netdev(ndev);
+       }
+
+       return 0;
+}
+
+static struct platform_driver dwceqos_driver = {
+       .probe   = dwceqos_probe,
+       .remove  = dwceqos_remove,
+       .driver  = {
+               .name  = DRIVER_NAME,
+               .of_match_table = dwceq_of_match,
+       },
+};
+
+module_platform_driver(dwceqos_driver);
+
+MODULE_DESCRIPTION("DWC Ethernet QoS v4.10a driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Andreas Irestaal <andreas.irestal@axis.com>");
+MODULE_AUTHOR("Lars Persson <lars.persson@axis.com>");
index 9749dfd78c434992f4041effc3c9a7314bdace3d..29ae672917b7b3d8c9324b6a77b64193e903b14f 100644 (file)
@@ -51,6 +51,8 @@
                    NETIF_MSG_PKTDATA   | NETIF_MSG_TX_QUEUED   |       \
                    NETIF_MSG_RX_STATUS)
 
+#define NETCP_EFUSE_ADDR_SWAP  2
+
 #define knav_queue_get_id(q)   knav_queue_device_control(q, \
                                KNAV_QUEUE_GET_ID, (unsigned long)NULL)
 
@@ -172,13 +174,22 @@ static void set_words(u32 *words, int num_words, u32 *desc)
 }
 
 /* Read the e-fuse value as 32 bit values to be endian independent */
-static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac)
+static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap)
 {
        unsigned int addr0, addr1;
 
        addr1 = readl(efuse_mac + 4);
        addr0 = readl(efuse_mac);
 
+       switch (swap) {
+       case NETCP_EFUSE_ADDR_SWAP:
+               addr0 = addr1;
+               addr1 = readl(efuse_mac);
+               break;
+       default:
+               break;
+       }
+
        x[0] = (addr1 & 0x0000ff00) >> 8;
        x[1] = addr1 & 0x000000ff;
        x[2] = (addr0 & 0xff000000) >> 24;
@@ -1902,7 +1913,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
                        goto quit;
                }
 
-               emac_arch_get_mac_addr(efuse_mac_addr, efuse);
+               emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac);
                if (is_valid_ether_addr(efuse_mac_addr))
                        ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
                else
@@ -2150,7 +2161,6 @@ MODULE_DEVICE_TABLE(of, of_match);
 static struct platform_driver netcp_driver = {
        .driver = {
                .name           = "netcp-1.0",
-               .owner          = THIS_MODULE,
                .of_match_table = of_match,
        },
        .probe = netcp_probe,
index 1974a8ae764aba6cb81e039239df68e91a58585b..6f16d6aaf7b76cdf5da3445b1a7c639f04e46200 100644 (file)
@@ -295,8 +295,6 @@ struct xgbe_hw_stats {
        u32     rx_dma_overruns;
 };
 
-#define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32))
-
 struct gbenu_ss_regs {
        u32     id_ver;
        u32     synce_count;            /* NU */
@@ -480,7 +478,6 @@ struct gbenu_hw_stats {
        u32     tx_pri7_drop_bcnt;
 };
 
-#define GBENU_NUM_HW_STAT_ENTRIES (sizeof(struct gbenu_hw_stats) / sizeof(u32))
 #define GBENU_HW_STATS_REG_MAP_SZ      0x200
 
 struct gbe_ss_regs {
@@ -615,7 +612,6 @@ struct gbe_hw_stats {
        u32     rx_dma_overruns;
 };
 
-#define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32))
 #define GBE_MAX_HW_STAT_MODS                   9
 #define GBE_HW_STATS_REG_MAP_SZ                        0x100
 
@@ -646,6 +642,7 @@ struct gbe_priv {
        bool                            enable_ale;
        u8                              max_num_slaves;
        u8                              max_num_ports; /* max_num_slaves + 1 */
+       u8                              num_stats_mods;
        struct netcp_tx_pipe            tx_pipe;
 
        int                             host_port;
@@ -675,6 +672,7 @@ struct gbe_priv {
        struct net_device               *dummy_ndev;
 
        u64                             *hw_stats;
+       u32                             *hw_stats_prev;
        const struct netcp_ethtool_stat *et_stats;
        int                             num_et_stats;
        /*  Lock for updating the hwstats */
@@ -874,7 +872,7 @@ static const struct netcp_ethtool_stat gbe13_et_stats[] = {
 };
 
 /* This is the size of entries in GBENU_STATS_HOST */
-#define GBENU_ET_STATS_HOST_SIZE       33
+#define GBENU_ET_STATS_HOST_SIZE       52
 
 #define GBENU_STATS_HOST(field)                                        \
 {                                                              \
@@ -883,8 +881,8 @@ static const struct netcp_ethtool_stat gbe13_et_stats[] = {
        offsetof(struct gbenu_hw_stats, field)                  \
 }
 
-/* This is the size of entries in GBENU_STATS_HOST */
-#define GBENU_ET_STATS_PORT_SIZE       46
+/* This is the size of entries in GBENU_STATS_PORT */
+#define GBENU_ET_STATS_PORT_SIZE       65
 
 #define GBENU_STATS_P1(field)                                  \
 {                                                              \
@@ -976,7 +974,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
        GBENU_STATS_HOST(ale_unknown_mcast_bytes),
        GBENU_STATS_HOST(ale_unknown_bcast),
        GBENU_STATS_HOST(ale_unknown_bcast_bytes),
+       GBENU_STATS_HOST(ale_pol_match),
+       GBENU_STATS_HOST(ale_pol_match_red),
+       GBENU_STATS_HOST(ale_pol_match_yellow),
        GBENU_STATS_HOST(tx_mem_protect_err),
+       GBENU_STATS_HOST(tx_pri0_drop),
+       GBENU_STATS_HOST(tx_pri1_drop),
+       GBENU_STATS_HOST(tx_pri2_drop),
+       GBENU_STATS_HOST(tx_pri3_drop),
+       GBENU_STATS_HOST(tx_pri4_drop),
+       GBENU_STATS_HOST(tx_pri5_drop),
+       GBENU_STATS_HOST(tx_pri6_drop),
+       GBENU_STATS_HOST(tx_pri7_drop),
+       GBENU_STATS_HOST(tx_pri0_drop_bcnt),
+       GBENU_STATS_HOST(tx_pri1_drop_bcnt),
+       GBENU_STATS_HOST(tx_pri2_drop_bcnt),
+       GBENU_STATS_HOST(tx_pri3_drop_bcnt),
+       GBENU_STATS_HOST(tx_pri4_drop_bcnt),
+       GBENU_STATS_HOST(tx_pri5_drop_bcnt),
+       GBENU_STATS_HOST(tx_pri6_drop_bcnt),
+       GBENU_STATS_HOST(tx_pri7_drop_bcnt),
        /* GBENU Module 1 */
        GBENU_STATS_P1(rx_good_frames),
        GBENU_STATS_P1(rx_broadcast_frames),
@@ -1023,7 +1040,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
        GBENU_STATS_P1(ale_unknown_mcast_bytes),
        GBENU_STATS_P1(ale_unknown_bcast),
        GBENU_STATS_P1(ale_unknown_bcast_bytes),
+       GBENU_STATS_P1(ale_pol_match),
+       GBENU_STATS_P1(ale_pol_match_red),
+       GBENU_STATS_P1(ale_pol_match_yellow),
        GBENU_STATS_P1(tx_mem_protect_err),
+       GBENU_STATS_P1(tx_pri0_drop),
+       GBENU_STATS_P1(tx_pri1_drop),
+       GBENU_STATS_P1(tx_pri2_drop),
+       GBENU_STATS_P1(tx_pri3_drop),
+       GBENU_STATS_P1(tx_pri4_drop),
+       GBENU_STATS_P1(tx_pri5_drop),
+       GBENU_STATS_P1(tx_pri6_drop),
+       GBENU_STATS_P1(tx_pri7_drop),
+       GBENU_STATS_P1(tx_pri0_drop_bcnt),
+       GBENU_STATS_P1(tx_pri1_drop_bcnt),
+       GBENU_STATS_P1(tx_pri2_drop_bcnt),
+       GBENU_STATS_P1(tx_pri3_drop_bcnt),
+       GBENU_STATS_P1(tx_pri4_drop_bcnt),
+       GBENU_STATS_P1(tx_pri5_drop_bcnt),
+       GBENU_STATS_P1(tx_pri6_drop_bcnt),
+       GBENU_STATS_P1(tx_pri7_drop_bcnt),
        /* GBENU Module 2 */
        GBENU_STATS_P2(rx_good_frames),
        GBENU_STATS_P2(rx_broadcast_frames),
@@ -1070,7 +1106,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
        GBENU_STATS_P2(ale_unknown_mcast_bytes),
        GBENU_STATS_P2(ale_unknown_bcast),
        GBENU_STATS_P2(ale_unknown_bcast_bytes),
+       GBENU_STATS_P2(ale_pol_match),
+       GBENU_STATS_P2(ale_pol_match_red),
+       GBENU_STATS_P2(ale_pol_match_yellow),
        GBENU_STATS_P2(tx_mem_protect_err),
+       GBENU_STATS_P2(tx_pri0_drop),
+       GBENU_STATS_P2(tx_pri1_drop),
+       GBENU_STATS_P2(tx_pri2_drop),
+       GBENU_STATS_P2(tx_pri3_drop),
+       GBENU_STATS_P2(tx_pri4_drop),
+       GBENU_STATS_P2(tx_pri5_drop),
+       GBENU_STATS_P2(tx_pri6_drop),
+       GBENU_STATS_P2(tx_pri7_drop),
+       GBENU_STATS_P2(tx_pri0_drop_bcnt),
+       GBENU_STATS_P2(tx_pri1_drop_bcnt),
+       GBENU_STATS_P2(tx_pri2_drop_bcnt),
+       GBENU_STATS_P2(tx_pri3_drop_bcnt),
+       GBENU_STATS_P2(tx_pri4_drop_bcnt),
+       GBENU_STATS_P2(tx_pri5_drop_bcnt),
+       GBENU_STATS_P2(tx_pri6_drop_bcnt),
+       GBENU_STATS_P2(tx_pri7_drop_bcnt),
        /* GBENU Module 3 */
        GBENU_STATS_P3(rx_good_frames),
        GBENU_STATS_P3(rx_broadcast_frames),
@@ -1117,7 +1172,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
        GBENU_STATS_P3(ale_unknown_mcast_bytes),
        GBENU_STATS_P3(ale_unknown_bcast),
        GBENU_STATS_P3(ale_unknown_bcast_bytes),
+       GBENU_STATS_P3(ale_pol_match),
+       GBENU_STATS_P3(ale_pol_match_red),
+       GBENU_STATS_P3(ale_pol_match_yellow),
        GBENU_STATS_P3(tx_mem_protect_err),
+       GBENU_STATS_P3(tx_pri0_drop),
+       GBENU_STATS_P3(tx_pri1_drop),
+       GBENU_STATS_P3(tx_pri2_drop),
+       GBENU_STATS_P3(tx_pri3_drop),
+       GBENU_STATS_P3(tx_pri4_drop),
+       GBENU_STATS_P3(tx_pri5_drop),
+       GBENU_STATS_P3(tx_pri6_drop),
+       GBENU_STATS_P3(tx_pri7_drop),
+       GBENU_STATS_P3(tx_pri0_drop_bcnt),
+       GBENU_STATS_P3(tx_pri1_drop_bcnt),
+       GBENU_STATS_P3(tx_pri2_drop_bcnt),
+       GBENU_STATS_P3(tx_pri3_drop_bcnt),
+       GBENU_STATS_P3(tx_pri4_drop_bcnt),
+       GBENU_STATS_P3(tx_pri5_drop_bcnt),
+       GBENU_STATS_P3(tx_pri6_drop_bcnt),
+       GBENU_STATS_P3(tx_pri7_drop_bcnt),
        /* GBENU Module 4 */
        GBENU_STATS_P4(rx_good_frames),
        GBENU_STATS_P4(rx_broadcast_frames),
@@ -1164,7 +1238,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
        GBENU_STATS_P4(ale_unknown_mcast_bytes),
        GBENU_STATS_P4(ale_unknown_bcast),
        GBENU_STATS_P4(ale_unknown_bcast_bytes),
+       GBENU_STATS_P4(ale_pol_match),
+       GBENU_STATS_P4(ale_pol_match_red),
+       GBENU_STATS_P4(ale_pol_match_yellow),
        GBENU_STATS_P4(tx_mem_protect_err),
+       GBENU_STATS_P4(tx_pri0_drop),
+       GBENU_STATS_P4(tx_pri1_drop),
+       GBENU_STATS_P4(tx_pri2_drop),
+       GBENU_STATS_P4(tx_pri3_drop),
+       GBENU_STATS_P4(tx_pri4_drop),
+       GBENU_STATS_P4(tx_pri5_drop),
+       GBENU_STATS_P4(tx_pri6_drop),
+       GBENU_STATS_P4(tx_pri7_drop),
+       GBENU_STATS_P4(tx_pri0_drop_bcnt),
+       GBENU_STATS_P4(tx_pri1_drop_bcnt),
+       GBENU_STATS_P4(tx_pri2_drop_bcnt),
+       GBENU_STATS_P4(tx_pri3_drop_bcnt),
+       GBENU_STATS_P4(tx_pri4_drop_bcnt),
+       GBENU_STATS_P4(tx_pri5_drop_bcnt),
+       GBENU_STATS_P4(tx_pri6_drop_bcnt),
+       GBENU_STATS_P4(tx_pri7_drop_bcnt),
        /* GBENU Module 5 */
        GBENU_STATS_P5(rx_good_frames),
        GBENU_STATS_P5(rx_broadcast_frames),
@@ -1211,7 +1304,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
        GBENU_STATS_P5(ale_unknown_mcast_bytes),
        GBENU_STATS_P5(ale_unknown_bcast),
        GBENU_STATS_P5(ale_unknown_bcast_bytes),
+       GBENU_STATS_P5(ale_pol_match),
+       GBENU_STATS_P5(ale_pol_match_red),
+       GBENU_STATS_P5(ale_pol_match_yellow),
        GBENU_STATS_P5(tx_mem_protect_err),
+       GBENU_STATS_P5(tx_pri0_drop),
+       GBENU_STATS_P5(tx_pri1_drop),
+       GBENU_STATS_P5(tx_pri2_drop),
+       GBENU_STATS_P5(tx_pri3_drop),
+       GBENU_STATS_P5(tx_pri4_drop),
+       GBENU_STATS_P5(tx_pri5_drop),
+       GBENU_STATS_P5(tx_pri6_drop),
+       GBENU_STATS_P5(tx_pri7_drop),
+       GBENU_STATS_P5(tx_pri0_drop_bcnt),
+       GBENU_STATS_P5(tx_pri1_drop_bcnt),
+       GBENU_STATS_P5(tx_pri2_drop_bcnt),
+       GBENU_STATS_P5(tx_pri3_drop_bcnt),
+       GBENU_STATS_P5(tx_pri4_drop_bcnt),
+       GBENU_STATS_P5(tx_pri5_drop_bcnt),
+       GBENU_STATS_P5(tx_pri6_drop_bcnt),
+       GBENU_STATS_P5(tx_pri7_drop_bcnt),
        /* GBENU Module 6 */
        GBENU_STATS_P6(rx_good_frames),
        GBENU_STATS_P6(rx_broadcast_frames),
@@ -1258,7 +1370,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
        GBENU_STATS_P6(ale_unknown_mcast_bytes),
        GBENU_STATS_P6(ale_unknown_bcast),
        GBENU_STATS_P6(ale_unknown_bcast_bytes),
+       GBENU_STATS_P6(ale_pol_match),
+       GBENU_STATS_P6(ale_pol_match_red),
+       GBENU_STATS_P6(ale_pol_match_yellow),
        GBENU_STATS_P6(tx_mem_protect_err),
+       GBENU_STATS_P6(tx_pri0_drop),
+       GBENU_STATS_P6(tx_pri1_drop),
+       GBENU_STATS_P6(tx_pri2_drop),
+       GBENU_STATS_P6(tx_pri3_drop),
+       GBENU_STATS_P6(tx_pri4_drop),
+       GBENU_STATS_P6(tx_pri5_drop),
+       GBENU_STATS_P6(tx_pri6_drop),
+       GBENU_STATS_P6(tx_pri7_drop),
+       GBENU_STATS_P6(tx_pri0_drop_bcnt),
+       GBENU_STATS_P6(tx_pri1_drop_bcnt),
+       GBENU_STATS_P6(tx_pri2_drop_bcnt),
+       GBENU_STATS_P6(tx_pri3_drop_bcnt),
+       GBENU_STATS_P6(tx_pri4_drop_bcnt),
+       GBENU_STATS_P6(tx_pri5_drop_bcnt),
+       GBENU_STATS_P6(tx_pri6_drop_bcnt),
+       GBENU_STATS_P6(tx_pri7_drop_bcnt),
        /* GBENU Module 7 */
        GBENU_STATS_P7(rx_good_frames),
        GBENU_STATS_P7(rx_broadcast_frames),
@@ -1305,7 +1436,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
        GBENU_STATS_P7(ale_unknown_mcast_bytes),
        GBENU_STATS_P7(ale_unknown_bcast),
        GBENU_STATS_P7(ale_unknown_bcast_bytes),
+       GBENU_STATS_P7(ale_pol_match),
+       GBENU_STATS_P7(ale_pol_match_red),
+       GBENU_STATS_P7(ale_pol_match_yellow),
        GBENU_STATS_P7(tx_mem_protect_err),
+       GBENU_STATS_P7(tx_pri0_drop),
+       GBENU_STATS_P7(tx_pri1_drop),
+       GBENU_STATS_P7(tx_pri2_drop),
+       GBENU_STATS_P7(tx_pri3_drop),
+       GBENU_STATS_P7(tx_pri4_drop),
+       GBENU_STATS_P7(tx_pri5_drop),
+       GBENU_STATS_P7(tx_pri6_drop),
+       GBENU_STATS_P7(tx_pri7_drop),
+       GBENU_STATS_P7(tx_pri0_drop_bcnt),
+       GBENU_STATS_P7(tx_pri1_drop_bcnt),
+       GBENU_STATS_P7(tx_pri2_drop_bcnt),
+       GBENU_STATS_P7(tx_pri3_drop_bcnt),
+       GBENU_STATS_P7(tx_pri4_drop_bcnt),
+       GBENU_STATS_P7(tx_pri5_drop_bcnt),
+       GBENU_STATS_P7(tx_pri6_drop_bcnt),
+       GBENU_STATS_P7(tx_pri7_drop_bcnt),
        /* GBENU Module 8 */
        GBENU_STATS_P8(rx_good_frames),
        GBENU_STATS_P8(rx_broadcast_frames),
@@ -1352,7 +1502,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = {
        GBENU_STATS_P8(ale_unknown_mcast_bytes),
        GBENU_STATS_P8(ale_unknown_bcast),
        GBENU_STATS_P8(ale_unknown_bcast_bytes),
+       GBENU_STATS_P8(ale_pol_match),
+       GBENU_STATS_P8(ale_pol_match_red),
+       GBENU_STATS_P8(ale_pol_match_yellow),
        GBENU_STATS_P8(tx_mem_protect_err),
+       GBENU_STATS_P8(tx_pri0_drop),
+       GBENU_STATS_P8(tx_pri1_drop),
+       GBENU_STATS_P8(tx_pri2_drop),
+       GBENU_STATS_P8(tx_pri3_drop),
+       GBENU_STATS_P8(tx_pri4_drop),
+       GBENU_STATS_P8(tx_pri5_drop),
+       GBENU_STATS_P8(tx_pri6_drop),
+       GBENU_STATS_P8(tx_pri7_drop),
+       GBENU_STATS_P8(tx_pri0_drop_bcnt),
+       GBENU_STATS_P8(tx_pri1_drop_bcnt),
+       GBENU_STATS_P8(tx_pri2_drop_bcnt),
+       GBENU_STATS_P8(tx_pri3_drop_bcnt),
+       GBENU_STATS_P8(tx_pri4_drop_bcnt),
+       GBENU_STATS_P8(tx_pri5_drop_bcnt),
+       GBENU_STATS_P8(tx_pri6_drop_bcnt),
+       GBENU_STATS_P8(tx_pri7_drop_bcnt),
 };
 
 #define XGBE_STATS0_INFO(field)                                \
@@ -1554,70 +1723,97 @@ static int keystone_get_sset_count(struct net_device *ndev, int stringset)
        }
 }
 
-static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
+static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
+{
+       void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
+       u32  __iomem *p_stats_entry;
+       int i;
+
+       for (i = 0; i < gbe_dev->num_et_stats; i++) {
+               if (gbe_dev->et_stats[i].type == stats_mod) {
+                       p_stats_entry = base + gbe_dev->et_stats[i].offset;
+                       gbe_dev->hw_stats[i] = 0;
+                       gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
+               }
+       }
+}
+
+static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
+                                            int et_stats_entry)
 {
        void __iomem *base = NULL;
-       u32  __iomem *p;
-       u32 tmp = 0;
+       u32  __iomem *p_stats_entry;
+       u32 curr, delta;
+
+       /* The hw_stats_regs pointers are already
+        * properly set to point to the right base:
+        */
+       base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
+       p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
+       curr = readl(p_stats_entry);
+       delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
+       gbe_dev->hw_stats_prev[et_stats_entry] = curr;
+       gbe_dev->hw_stats[et_stats_entry] += delta;
+}
+
+static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
+{
        int i;
 
        for (i = 0; i < gbe_dev->num_et_stats; i++) {
-               base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type];
-               p = base + gbe_dev->et_stats[i].offset;
-               tmp = readl(p);
-               gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp;
+               gbe_update_hw_stats_entry(gbe_dev, i);
+
                if (data)
                        data[i] = gbe_dev->hw_stats[i];
-               /* write-to-decrement:
-                * new register value = old register value - write value
-                */
-               writel(tmp, p);
        }
 }
 
-static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
+static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
+                                              int stats_mod)
 {
-       void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0];
-       void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1];
-       u64 *hw_stats = &gbe_dev->hw_stats[0];
-       void __iomem *base = NULL;
-       u32  __iomem *p;
-       u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2);
-       int i, j, pair;
+       u32 val;
 
-       for (pair = 0; pair < 2; pair++) {
-               val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+       val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
 
-               if (pair == 0)
-                       val &= ~GBE_STATS_CD_SEL;
-               else
-                       val |= GBE_STATS_CD_SEL;
+       switch (stats_mod) {
+       case GBE_STATSA_MODULE:
+       case GBE_STATSB_MODULE:
+               val &= ~GBE_STATS_CD_SEL;
+               break;
+       case GBE_STATSC_MODULE:
+       case GBE_STATSD_MODULE:
+               val |= GBE_STATS_CD_SEL;
+               break;
+       default:
+               return;
+       }
 
-               /* make the stat modules visible */
-               writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+       /* make the stat module visible */
+       writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+}
 
-               for (i = 0; i < pair_size; i++) {
-                       j = pair * pair_size + i;
-                       switch (gbe_dev->et_stats[j].type) {
-                       case GBE_STATSA_MODULE:
-                       case GBE_STATSC_MODULE:
-                               base = gbe_statsa;
-                       break;
-                       case GBE_STATSB_MODULE:
-                       case GBE_STATSD_MODULE:
-                               base  = gbe_statsb;
-                       break;
-                       }
+static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
+{
+       gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
+       gbe_reset_mod_stats(gbe_dev, stats_mod);
+}
+
+static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
+{
+       u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
+       int et_entry, j, pair;
+
+       for (pair = 0; pair < 2; pair++) {
+               gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
+                                                     GBE_STATSC_MODULE :
+                                                     GBE_STATSA_MODULE));
+
+               for (j = 0; j < half_num_et_stats; j++) {
+                       et_entry = pair * half_num_et_stats + j;
+                       gbe_update_hw_stats_entry(gbe_dev, et_entry);
 
-                       p = base + gbe_dev->et_stats[j].offset;
-                       tmp = readl(p);
-                       hw_stats[j] += tmp;
                        if (data)
-                               data[j] = hw_stats[j];
-                       /* write-to-decrement:
-                        * new register value = old register value - write value
-                        */
-                       writel(tmp, p);
+                               data[et_entry] = gbe_dev->hw_stats[et_entry];
                }
        }
 }
@@ -2207,14 +2403,15 @@ static void netcp_ethss_timer(unsigned long arg)
                netcp_ethss_update_link_state(gbe_dev, slave, NULL);
        }
 
-       spin_lock_bh(&gbe_dev->hw_stats_lock);
+       /* A timer runs as a BH, no need to block them */
+       spin_lock(&gbe_dev->hw_stats_lock);
 
        if (gbe_dev->ss_version == GBE_SS_VERSION_14)
                gbe_update_stats_ver14(gbe_dev, NULL);
        else
                gbe_update_stats(gbe_dev, NULL);
 
-       spin_unlock_bh(&gbe_dev->hw_stats_lock);
+       spin_unlock(&gbe_dev->hw_stats_lock);
 
        gbe_dev->timer.expires  = jiffies + GBE_TIMER_INTERVAL;
        add_timer(&gbe_dev->timer);
@@ -2571,15 +2768,28 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
        }
        gbe_dev->xgbe_serdes_regs = regs;
 
+       gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
+       gbe_dev->et_stats = xgbe10_et_stats;
+       gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
+
        gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
-                                 XGBE10_NUM_STAT_ENTRIES *
-                                 (gbe_dev->max_num_ports) * sizeof(u64),
-                                 GFP_KERNEL);
+                                        gbe_dev->num_et_stats * sizeof(u64),
+                                        GFP_KERNEL);
        if (!gbe_dev->hw_stats) {
                dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
                return -ENOMEM;
        }
 
+       gbe_dev->hw_stats_prev =
+               devm_kzalloc(gbe_dev->dev,
+                            gbe_dev->num_et_stats * sizeof(u32),
+                            GFP_KERNEL);
+       if (!gbe_dev->hw_stats_prev) {
+               dev_err(gbe_dev->dev,
+                       "hw_stats_prev memory allocation failed\n");
+               return -ENOMEM;
+       }
+
        gbe_dev->ss_version = XGBE_SS_VERSION_10;
        gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
                                        XGBE10_SGMII_MODULE_OFFSET;
@@ -2593,8 +2803,6 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
        gbe_dev->ale_ports = gbe_dev->max_num_ports;
        gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
        gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
-       gbe_dev->et_stats = xgbe10_et_stats;
-       gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
        gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
 
        /* Subsystem registers */
@@ -2679,30 +2887,45 @@ static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
        }
        gbe_dev->switch_regs = regs;
 
+       gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
+       gbe_dev->et_stats = gbe13_et_stats;
+       gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
+
        gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
-                                         GBE13_NUM_HW_STAT_ENTRIES *
-                                         gbe_dev->max_num_slaves * sizeof(u64),
-                                         GFP_KERNEL);
+                                        gbe_dev->num_et_stats * sizeof(u64),
+                                        GFP_KERNEL);
        if (!gbe_dev->hw_stats) {
                dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
                return -ENOMEM;
        }
 
+       gbe_dev->hw_stats_prev =
+               devm_kzalloc(gbe_dev->dev,
+                            gbe_dev->num_et_stats * sizeof(u32),
+                            GFP_KERNEL);
+       if (!gbe_dev->hw_stats_prev) {
+               dev_err(gbe_dev->dev,
+                       "hw_stats_prev memory allocation failed\n");
+               return -ENOMEM;
+       }
+
        gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
        gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
 
+       /* K2HK has only 2 hw stats modules visible at a time, so
+        * module 0 & 2 points to one base and
+        * module 1 & 3 points to the other base
+        */
        for (i = 0; i < gbe_dev->max_num_slaves; i++) {
                gbe_dev->hw_stats_regs[i] =
                        gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
-                       (GBE_HW_STATS_REG_MAP_SZ * i);
+                       (GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
        }
 
        gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
        gbe_dev->ale_ports = gbe_dev->max_num_ports;
        gbe_dev->host_port = GBE13_HOST_PORT_NUM;
        gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
-       gbe_dev->et_stats = gbe13_et_stats;
-       gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
        gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
 
        /* Subsystem registers */
@@ -2729,15 +2952,34 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
        void __iomem *regs;
        int i, ret;
 
+       gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
+       gbe_dev->et_stats = gbenu_et_stats;
+
+       if (IS_SS_ID_NU(gbe_dev))
+               gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+                       (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
+       else
+               gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+                                       GBENU_ET_STATS_PORT_SIZE;
+
        gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
-                                 GBENU_NUM_HW_STAT_ENTRIES *
-                                 (gbe_dev->max_num_ports) * sizeof(u64),
-                                 GFP_KERNEL);
+                                        gbe_dev->num_et_stats * sizeof(u64),
+                                        GFP_KERNEL);
        if (!gbe_dev->hw_stats) {
                dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
                return -ENOMEM;
        }
 
+       gbe_dev->hw_stats_prev =
+               devm_kzalloc(gbe_dev->dev,
+                            gbe_dev->num_et_stats * sizeof(u32),
+                            GFP_KERNEL);
+       if (!gbe_dev->hw_stats_prev) {
+               dev_err(gbe_dev->dev,
+                       "hw_stats_prev memory allocation failed\n");
+               return -ENOMEM;
+       }
+
        ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
        if (ret) {
                dev_err(gbe_dev->dev,
@@ -2765,16 +3007,8 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
        gbe_dev->ale_ports = gbe_dev->max_num_ports;
        gbe_dev->host_port = GBENU_HOST_PORT_NUM;
        gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
-       gbe_dev->et_stats = gbenu_et_stats;
        gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
 
-       if (IS_SS_ID_NU(gbe_dev))
-               gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
-                       (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
-       else
-               gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
-                                       GBENU_ET_STATS_PORT_SIZE;
-
        /* Subsystem registers */
        GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
 
@@ -2804,7 +3038,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
        struct cpsw_ale_params ale_params;
        struct gbe_priv *gbe_dev;
        u32 slave_num;
-       int ret = 0;
+       int i, ret = 0;
 
        if (!node) {
                dev_err(dev, "device tree info unavailable\n");
@@ -2951,6 +3185,15 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
        /* initialize host port */
        gbe_init_host_port(gbe_dev);
 
+       spin_lock_bh(&gbe_dev->hw_stats_lock);
+       for (i = 0; i < gbe_dev->num_stats_mods; i++) {
+               if (gbe_dev->ss_version == GBE_SS_VERSION_14)
+                       gbe_reset_mod_stats_ver14(gbe_dev, i);
+               else
+                       gbe_reset_mod_stats(gbe_dev, i);
+       }
+       spin_unlock_bh(&gbe_dev->hw_stats_lock);
+
        init_timer(&gbe_dev->timer);
        gbe_dev->timer.data      = (unsigned long)gbe_dev;
        gbe_dev->timer.function = netcp_ethss_timer;
index dd4544085db321d2f9020d97ebbbb9a8887ed4b9..5ce7020ca53004b602df31a11954f1fb6c7e7e23 100644 (file)
@@ -541,6 +541,29 @@ union nvsp_2_message_uber {
        struct nvsp_2_free_rxbuf free_rxbuf;
 } __packed;
 
+struct nvsp_4_send_vf_association {
+       /* 1: allocated, serial number is valid. 0: not allocated */
+       u32 allocated;
+
+       /* Serial number of the VF to team with */
+       u32 serial;
+} __packed;
+
+enum nvsp_vm_datapath {
+       NVSP_DATAPATH_SYNTHETIC = 0,
+       NVSP_DATAPATH_VF,
+       NVSP_DATAPATH_MAX
+};
+
+struct nvsp_4_sw_datapath {
+       u32 active_datapath; /* active data path in VM */
+} __packed;
+
+union nvsp_4_message_uber {
+       struct nvsp_4_send_vf_association vf_assoc;
+       struct nvsp_4_sw_datapath active_dp;
+} __packed;
+
 enum nvsp_subchannel_operation {
        NVSP_SUBCHANNEL_NONE = 0,
        NVSP_SUBCHANNEL_ALLOCATE,
@@ -578,6 +601,7 @@ union nvsp_all_messages {
        union nvsp_message_init_uber init_msg;
        union nvsp_1_message_uber v1_msg;
        union nvsp_2_message_uber v2_msg;
+       union nvsp_4_message_uber v4_msg;
        union nvsp_5_message_uber v5_msg;
 } __packed;
 
@@ -589,6 +613,7 @@ struct nvsp_message {
 
 
 #define NETVSC_MTU 65536
+#define NETVSC_MTU_MIN 68
 
 #define NETVSC_RECEIVE_BUFFER_SIZE             (1024*1024*16)  /* 16MB */
 #define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY      (1024*1024*15)  /* 15MB */
@@ -670,6 +695,8 @@ struct netvsc_device {
        u32 send_table[VRSS_SEND_TAB_SIZE];
        u32 max_chn;
        u32 num_chn;
+       spinlock_t sc_lock; /* Protects num_sc_offered variable */
+       u32 num_sc_offered;
        atomic_t queue_sends[NR_CPUS];
 
        /* Holds rndis device info */
@@ -688,6 +715,11 @@ struct netvsc_device {
 
        /* The net device context */
        struct net_device_context *nd_ctx;
+
+       /* 1: allocated, serial number is valid. 0: not allocated */
+       u32 vf_alloc;
+       /* Serial number of the VF to team with */
+       u32 vf_serial;
 };
 
 /* NdisInitialize message */
index 23126a74f3577b4263a8136f41548c396c7d2d58..51e4c0fd0a7480c4c705a79a9cf8ec2f627862f9 100644 (file)
@@ -453,13 +453,16 @@ static int negotiate_nvsp_ver(struct hv_device *device,
        if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
                return 0;
 
-       /* NVSPv2 only: Send NDIS config */
+       /* NVSPv2 or later: Send NDIS config */
        memset(init_packet, 0, sizeof(struct nvsp_message));
        init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
        init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu +
                                                       ETH_HLEN;
        init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
 
+       if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5)
+               init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1;
+
        ret = vmbus_sendpacket(device->channel, init_packet,
                                sizeof(struct nvsp_message),
                                (unsigned long)init_packet,
@@ -1064,11 +1067,10 @@ static void netvsc_receive(struct netvsc_device *net_device,
 
 
 static void netvsc_send_table(struct hv_device *hdev,
-                             struct vmpacket_descriptor *vmpkt)
+                             struct nvsp_message *nvmsg)
 {
        struct netvsc_device *nvscdev;
        struct net_device *ndev;
-       struct nvsp_message *nvmsg;
        int i;
        u32 count, *tab;
 
@@ -1077,12 +1079,6 @@ static void netvsc_send_table(struct hv_device *hdev,
                return;
        ndev = nvscdev->ndev;
 
-       nvmsg = (struct nvsp_message *)((unsigned long)vmpkt +
-                                       (vmpkt->offset8 << 3));
-
-       if (nvmsg->hdr.msg_type != NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE)
-               return;
-
        count = nvmsg->msg.v5_msg.send_table.count;
        if (count != VRSS_SEND_TAB_SIZE) {
                netdev_err(ndev, "Received wrong send-table size:%u\n", count);
@@ -1096,6 +1092,28 @@ static void netvsc_send_table(struct hv_device *hdev,
                nvscdev->send_table[i] = tab[i];
 }
 
+static void netvsc_send_vf(struct netvsc_device *nvdev,
+                          struct nvsp_message *nvmsg)
+{
+       nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
+       nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
+}
+
+static inline void netvsc_receive_inband(struct hv_device *hdev,
+                                        struct netvsc_device *nvdev,
+                                        struct nvsp_message *nvmsg)
+{
+       switch (nvmsg->hdr.msg_type) {
+       case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
+               netvsc_send_table(hdev, nvmsg);
+               break;
+
+       case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
+               netvsc_send_vf(nvdev, nvmsg);
+               break;
+       }
+}
+
 void netvsc_channel_cb(void *context)
 {
        int ret;
@@ -1108,6 +1126,7 @@ void netvsc_channel_cb(void *context)
        unsigned char *buffer;
        int bufferlen = NETVSC_PACKET_SIZE;
        struct net_device *ndev;
+       struct nvsp_message *nvmsg;
 
        if (channel->primary_channel != NULL)
                device = channel->primary_channel->device_obj;
@@ -1126,6 +1145,8 @@ void netvsc_channel_cb(void *context)
                if (ret == 0) {
                        if (bytes_recvd > 0) {
                                desc = (struct vmpacket_descriptor *)buffer;
+                               nvmsg = (struct nvsp_message *)((unsigned long)
+                                        desc + (desc->offset8 << 3));
                                switch (desc->type) {
                                case VM_PKT_COMP:
                                        netvsc_send_completion(net_device,
@@ -1138,7 +1159,9 @@ void netvsc_channel_cb(void *context)
                                        break;
 
                                case VM_PKT_DATA_INBAND:
-                                       netvsc_send_table(device, desc);
+                                       netvsc_receive_inband(device,
+                                                             net_device,
+                                                             nvmsg);
                                        break;
 
                                default:
index 358475ed9b5964c53f038c61f7fb8a3996c2a5ab..7b36d5fecc1f24b95c87477ebd43a06ec5a4f914 100644 (file)
@@ -106,7 +106,7 @@ static int netvsc_open(struct net_device *net)
                return ret;
        }
 
-       netif_tx_start_all_queues(net);
+       netif_tx_wake_all_queues(net);
 
        nvdev = hv_get_drvdata(device_obj);
        rdev = nvdev->extension;
@@ -120,15 +120,56 @@ static int netvsc_close(struct net_device *net)
 {
        struct net_device_context *net_device_ctx = netdev_priv(net);
        struct hv_device *device_obj = net_device_ctx->device_ctx;
+       struct netvsc_device *nvdev = hv_get_drvdata(device_obj);
        int ret;
+       u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
+       struct vmbus_channel *chn;
 
        netif_tx_disable(net);
 
        /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
        cancel_work_sync(&net_device_ctx->work);
        ret = rndis_filter_close(device_obj);
-       if (ret != 0)
+       if (ret != 0) {
                netdev_err(net, "unable to close device (ret %d).\n", ret);
+               return ret;
+       }
+
+       /* Ensure pending bytes in ring are read */
+       while (true) {
+               aread = 0;
+               for (i = 0; i < nvdev->num_chn; i++) {
+                       chn = nvdev->chn_table[i];
+                       if (!chn)
+                               continue;
+
+                       hv_get_ringbuffer_availbytes(&chn->inbound, &aread,
+                                                    &awrite);
+
+                       if (aread)
+                               break;
+
+                       hv_get_ringbuffer_availbytes(&chn->outbound, &aread,
+                                                    &awrite);
+
+                       if (aread)
+                               break;
+               }
+
+               retry++;
+               if (retry > retry_max || aread == 0)
+                       break;
+
+               msleep(msec);
+
+               if (msec < 1000)
+                       msec *= 2;
+       }
+
+       if (aread) {
+               netdev_err(net, "Ring buffer not empty after closing rndis\n");
+               ret = -ETIMEDOUT;
+       }
 
        return ret;
 }
@@ -736,6 +777,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
        struct netvsc_device *nvdev = hv_get_drvdata(hdev);
        struct netvsc_device_info device_info;
        int limit = ETH_DATA_LEN;
+       int ret = 0;
 
        if (nvdev == NULL || nvdev->destroy)
                return -ENODEV;
@@ -743,13 +785,14 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
        if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
                limit = NETVSC_MTU - ETH_HLEN;
 
-       /* Hyper-V hosts don't support MTU < ETH_DATA_LEN (1500) */
-       if (mtu < ETH_DATA_LEN || mtu > limit)
+       if (mtu < NETVSC_MTU_MIN || mtu > limit)
                return -EINVAL;
 
+       ret = netvsc_close(ndev);
+       if (ret)
+               goto out;
+
        nvdev->start_remove = true;
-       cancel_work_sync(&ndevctx->work);
-       netif_tx_disable(ndev);
        rndis_filter_device_remove(hdev);
 
        ndev->mtu = mtu;
@@ -759,9 +802,11 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
        device_info.ring_size = ring_size;
        device_info.max_num_vrss_chns = max_num_vrss_chns;
        rndis_filter_device_add(hdev, &device_info);
-       netif_tx_wake_all_queues(ndev);
 
-       return 0;
+out:
+       netvsc_open(ndev);
+
+       return ret;
 }
 
 static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
index 236aeb76ef224ba5eaf9e0994b6e9363434047ac..9b8263db49cc30c8a079cd27d0ba08aa67df34cd 100644 (file)
@@ -984,9 +984,16 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
        struct netvsc_device *nvscdev;
        u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
        int ret;
+       unsigned long flags;
 
        nvscdev = hv_get_drvdata(new_sc->primary_channel->device_obj);
 
+       spin_lock_irqsave(&nvscdev->sc_lock, flags);
+       nvscdev->num_sc_offered--;
+       spin_unlock_irqrestore(&nvscdev->sc_lock, flags);
+       if (nvscdev->num_sc_offered == 0)
+               complete(&nvscdev->channel_init_wait);
+
        if (chn_index >= nvscdev->num_chn)
                return;
 
@@ -1015,8 +1022,10 @@ int rndis_filter_device_add(struct hv_device *dev,
        u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
        u32 mtu, size;
        u32 num_rss_qs;
+       u32 sc_delta;
        const struct cpumask *node_cpu_mask;
        u32 num_possible_rss_qs;
+       unsigned long flags;
 
        rndis_device = get_rndis_device();
        if (!rndis_device)
@@ -1039,6 +1048,8 @@ int rndis_filter_device_add(struct hv_device *dev,
        net_device->max_chn = 1;
        net_device->num_chn = 1;
 
+       spin_lock_init(&net_device->sc_lock);
+
        net_device->extension = rndis_device;
        rndis_device->net_dev = net_device;
 
@@ -1054,7 +1065,7 @@ int rndis_filter_device_add(struct hv_device *dev,
        ret = rndis_filter_query_device(rndis_device,
                                        RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
                                        &mtu, &size);
-       if (ret == 0 && size == sizeof(u32))
+       if (ret == 0 && size == sizeof(u32) && mtu < net_device->ndev->mtu)
                net_device->ndev->mtu = mtu;
 
        /* Get the mac address */
@@ -1116,6 +1127,9 @@ int rndis_filter_device_add(struct hv_device *dev,
        num_possible_rss_qs = cpumask_weight(node_cpu_mask);
        net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
 
+       num_rss_qs = net_device->num_chn - 1;
+       net_device->num_sc_offered = num_rss_qs;
+
        if (net_device->num_chn == 1)
                goto out;
 
@@ -1157,11 +1171,25 @@ int rndis_filter_device_add(struct hv_device *dev,
 
        ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn);
 
+       /*
+        * Wait for the host to send us the sub-channel offers.
+        */
+       spin_lock_irqsave(&net_device->sc_lock, flags);
+       sc_delta = num_rss_qs - (net_device->num_chn - 1);
+       net_device->num_sc_offered -= sc_delta;
+       spin_unlock_irqrestore(&net_device->sc_lock, flags);
+
+       while (net_device->num_sc_offered != 0) {
+               t = wait_for_completion_timeout(&net_device->channel_init_wait, 10*HZ);
+               if (t == 0)
+                       WARN(1, "Netvsc: Waiting for sub-channel processing");
+       }
 out:
        if (ret) {
                net_device->max_chn = 1;
                net_device->num_chn = 1;
        }
+
        return 0; /* return 0 because primary channel can be used alone */
 
 err_dev_remv:
index f7bd9f3ddaac8c2044e2ea3215e02b98fdc3587d..d0d5bf6cbb686a357e4e5ed98ad652c0dbf6908a 100644 (file)
@@ -545,7 +545,9 @@ at86rf230_async_state_delay(void *context)
        }
 
        /* Default delay is 1us in the most cases */
-       tim = ktime_set(0, NSEC_PER_USEC);
+       udelay(1);
+       at86rf230_async_state_timer(&ctx->timer);
+       return;
 
 change:
        hrtimer_start(&ctx->timer, tim, HRTIMER_MODE_REL);
index b6fc295796679554fc8a9d499eb349dc1bdd662f..613dae559925f947586f8f011aad315d0705866c 100644 (file)
@@ -1151,7 +1151,6 @@ MODULE_DEVICE_TABLE(of, cc2520_of_ids);
 static struct spi_driver cc2520_driver = {
        .driver = {
                .name = "cc2520",
-               .bus = &spi_bus_type,
                .owner = THIS_MODULE,
                .of_match_table = of_match_ptr(cc2520_of_ids),
        },
index 2549760e039fd803fc747f501d74c336fac71b58..997724b8e4343b3ab068a86463393e8008f6ee91 100644 (file)
@@ -812,7 +812,6 @@ MODULE_DEVICE_TABLE(spi, mrf24j40_ids);
 static struct spi_driver mrf24j40_driver = {
        .driver = {
                .name = "mrf24j40",
-               .bus = &spi_bus_type,
                .owner = THIS_MODULE,
        },
        .id_table = mrf24j40_ids,
index 94570aace2414a5984d989045c599ed141582eb5..cc56fac3c3f83ef51e4f42f67f9e1c4e549f7725 100644 (file)
 #include <net/net_namespace.h>
 
 #define TX_Q_LIMIT    32
-struct ifb_private {
+struct ifb_q_private {
+       struct net_device       *dev;
        struct tasklet_struct   ifb_tasklet;
-       int     tasklet_pending;
-
-       struct u64_stats_sync   rsync;
+       int                     tasklet_pending;
+       int                     txqnum;
        struct sk_buff_head     rq;
-       u64 rx_packets;
-       u64 rx_bytes;
+       u64                     rx_packets;
+       u64                     rx_bytes;
+       struct u64_stats_sync   rsync;
 
        struct u64_stats_sync   tsync;
+       u64                     tx_packets;
+       u64                     tx_bytes;
        struct sk_buff_head     tq;
-       u64 tx_packets;
-       u64 tx_bytes;
-};
+} ____cacheline_aligned_in_smp;
 
-static int numifbs = 2;
+struct ifb_dev_private {
+       struct ifb_q_private *tx_private;
+};
 
-static void ri_tasklet(unsigned long dev);
 static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
 static int ifb_open(struct net_device *dev);
 static int ifb_close(struct net_device *dev);
 
-static void ri_tasklet(unsigned long dev)
+static void ifb_ri_tasklet(unsigned long _txp)
 {
-       struct net_device *_dev = (struct net_device *)dev;
-       struct ifb_private *dp = netdev_priv(_dev);
+       struct ifb_q_private *txp = (struct ifb_q_private *)_txp;
        struct netdev_queue *txq;
        struct sk_buff *skb;
 
-       txq = netdev_get_tx_queue(_dev, 0);
-       if ((skb = skb_peek(&dp->tq)) == NULL) {
-               if (__netif_tx_trylock(txq)) {
-                       skb_queue_splice_tail_init(&dp->rq, &dp->tq);
-                       __netif_tx_unlock(txq);
-               } else {
-                       /* reschedule */
+       txq = netdev_get_tx_queue(txp->dev, txp->txqnum);
+       skb = skb_peek(&txp->tq);
+       if (!skb) {
+               if (!__netif_tx_trylock(txq))
                        goto resched;
-               }
+               skb_queue_splice_tail_init(&txp->rq, &txp->tq);
+               __netif_tx_unlock(txq);
        }
 
-       while ((skb = __skb_dequeue(&dp->tq)) != NULL) {
+       while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
                u32 from = G_TC_FROM(skb->tc_verd);
 
                skb->tc_verd = 0;
                skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
 
-               u64_stats_update_begin(&dp->tsync);
-               dp->tx_packets++;
-               dp->tx_bytes += skb->len;
-               u64_stats_update_end(&dp->tsync);
+               u64_stats_update_begin(&txp->tsync);
+               txp->tx_packets++;
+               txp->tx_bytes += skb->len;
+               u64_stats_update_end(&txp->tsync);
 
                rcu_read_lock();
-               skb->dev = dev_get_by_index_rcu(dev_net(_dev), skb->skb_iif);
+               skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
                if (!skb->dev) {
                        rcu_read_unlock();
                        dev_kfree_skb(skb);
-                       _dev->stats.tx_dropped++;
-                       if (skb_queue_len(&dp->tq) != 0)
+                       txp->dev->stats.tx_dropped++;
+                       if (skb_queue_len(&txp->tq) != 0)
                                goto resched;
                        break;
                }
                rcu_read_unlock();
-               skb->skb_iif = _dev->ifindex;
+               skb->skb_iif = txp->dev->ifindex;
 
                if (from & AT_EGRESS) {
                        dev_queue_xmit(skb);
@@ -112,10 +111,11 @@ static void ri_tasklet(unsigned long dev)
        }
 
        if (__netif_tx_trylock(txq)) {
-               if ((skb = skb_peek(&dp->rq)) == NULL) {
-                       dp->tasklet_pending = 0;
-                       if (netif_queue_stopped(_dev))
-                               netif_wake_queue(_dev);
+               skb = skb_peek(&txp->rq);
+               if (!skb) {
+                       txp->tasklet_pending = 0;
+                       if (netif_tx_queue_stopped(txq))
+                               netif_tx_wake_queue(txq);
                } else {
                        __netif_tx_unlock(txq);
                        goto resched;
@@ -123,8 +123,8 @@ static void ri_tasklet(unsigned long dev)
                __netif_tx_unlock(txq);
        } else {
 resched:
-               dp->tasklet_pending = 1;
-               tasklet_schedule(&dp->ifb_tasklet);
+               txp->tasklet_pending = 1;
+               tasklet_schedule(&txp->ifb_tasklet);
        }
 
 }
@@ -132,29 +132,58 @@ resched:
 static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
                                             struct rtnl_link_stats64 *stats)
 {
-       struct ifb_private *dp = netdev_priv(dev);
+       struct ifb_dev_private *dp = netdev_priv(dev);
+       struct ifb_q_private *txp = dp->tx_private;
        unsigned int start;
-
-       do {
-               start = u64_stats_fetch_begin_irq(&dp->rsync);
-               stats->rx_packets = dp->rx_packets;
-               stats->rx_bytes = dp->rx_bytes;
-       } while (u64_stats_fetch_retry_irq(&dp->rsync, start));
-
-       do {
-               start = u64_stats_fetch_begin_irq(&dp->tsync);
-
-               stats->tx_packets = dp->tx_packets;
-               stats->tx_bytes = dp->tx_bytes;
-
-       } while (u64_stats_fetch_retry_irq(&dp->tsync, start));
-
+       u64 packets, bytes;
+       int i;
+
+       for (i = 0; i < dev->num_tx_queues; i++,txp++) {
+               do {
+                       start = u64_stats_fetch_begin_irq(&txp->rsync);
+                       packets = txp->rx_packets;
+                       bytes = txp->rx_bytes;
+               } while (u64_stats_fetch_retry_irq(&txp->rsync, start));
+               stats->rx_packets += packets;
+               stats->rx_bytes += bytes;
+
+               do {
+                       start = u64_stats_fetch_begin_irq(&txp->tsync);
+                       packets = txp->tx_packets;
+                       bytes = txp->tx_bytes;
+               } while (u64_stats_fetch_retry_irq(&txp->tsync, start));
+               stats->tx_packets += packets;
+               stats->tx_bytes += bytes;
+       }
        stats->rx_dropped = dev->stats.rx_dropped;
        stats->tx_dropped = dev->stats.tx_dropped;
 
        return stats;
 }
 
+static int ifb_dev_init(struct net_device *dev)
+{
+       struct ifb_dev_private *dp = netdev_priv(dev);
+       struct ifb_q_private *txp;
+       int i;
+
+       txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL);
+       if (!txp)
+               return -ENOMEM;
+       dp->tx_private = txp;
+       for (i = 0; i < dev->num_tx_queues; i++,txp++) {
+               txp->txqnum = i;
+               txp->dev = dev;
+               __skb_queue_head_init(&txp->rq);
+               __skb_queue_head_init(&txp->tq);
+               u64_stats_init(&txp->rsync);
+               u64_stats_init(&txp->tsync);
+               tasklet_init(&txp->ifb_tasklet, ifb_ri_tasklet,
+                            (unsigned long)txp);
+               netif_tx_start_queue(netdev_get_tx_queue(dev, i));
+       }
+       return 0;
+}
 
 static const struct net_device_ops ifb_netdev_ops = {
        .ndo_open       = ifb_open,
@@ -162,6 +191,7 @@ static const struct net_device_ops ifb_netdev_ops = {
        .ndo_get_stats64 = ifb_stats64,
        .ndo_start_xmit = ifb_xmit,
        .ndo_validate_addr = eth_validate_addr,
+       .ndo_init       = ifb_dev_init,
 };
 
 #define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG  | NETIF_F_FRAGLIST | \
@@ -169,10 +199,24 @@ static const struct net_device_ops ifb_netdev_ops = {
                      NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX         | \
                      NETIF_F_HW_VLAN_STAG_TX)
 
+static void ifb_dev_free(struct net_device *dev)
+{
+       struct ifb_dev_private *dp = netdev_priv(dev);
+       struct ifb_q_private *txp = dp->tx_private;
+       int i;
+
+       for (i = 0; i < dev->num_tx_queues; i++,txp++) {
+               tasklet_kill(&txp->ifb_tasklet);
+               __skb_queue_purge(&txp->rq);
+               __skb_queue_purge(&txp->tq);
+       }
+       kfree(dp->tx_private);
+       free_netdev(dev);
+}
+
 static void ifb_setup(struct net_device *dev)
 {
        /* Initialize the device structure. */
-       dev->destructor = free_netdev;
        dev->netdev_ops = &ifb_netdev_ops;
 
        /* Fill in device structure with ethernet-generic values. */
@@ -188,17 +232,19 @@ static void ifb_setup(struct net_device *dev)
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        netif_keep_dst(dev);
        eth_hw_addr_random(dev);
+       dev->destructor = ifb_dev_free;
 }
 
 static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-       struct ifb_private *dp = netdev_priv(dev);
+       struct ifb_dev_private *dp = netdev_priv(dev);
        u32 from = G_TC_FROM(skb->tc_verd);
+       struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
 
-       u64_stats_update_begin(&dp->rsync);
-       dp->rx_packets++;
-       dp->rx_bytes += skb->len;
-       u64_stats_update_end(&dp->rsync);
+       u64_stats_update_begin(&txp->rsync);
+       txp->rx_packets++;
+       txp->rx_bytes += skb->len;
+       u64_stats_update_end(&txp->rsync);
 
        if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) {
                dev_kfree_skb(skb);
@@ -206,14 +252,13 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_OK;
        }
 
-       if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) {
-               netif_stop_queue(dev);
-       }
+       if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
+               netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum));
 
-       __skb_queue_tail(&dp->rq, skb);
-       if (!dp->tasklet_pending) {
-               dp->tasklet_pending = 1;
-               tasklet_schedule(&dp->ifb_tasklet);
+       __skb_queue_tail(&txp->rq, skb);
+       if (!txp->tasklet_pending) {
+               txp->tasklet_pending = 1;
+               tasklet_schedule(&txp->ifb_tasklet);
        }
 
        return NETDEV_TX_OK;
@@ -221,24 +266,13 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
 
 static int ifb_close(struct net_device *dev)
 {
-       struct ifb_private *dp = netdev_priv(dev);
-
-       tasklet_kill(&dp->ifb_tasklet);
-       netif_stop_queue(dev);
-       __skb_queue_purge(&dp->rq);
-       __skb_queue_purge(&dp->tq);
+       netif_tx_stop_all_queues(dev);
        return 0;
 }
 
 static int ifb_open(struct net_device *dev)
 {
-       struct ifb_private *dp = netdev_priv(dev);
-
-       tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev);
-       __skb_queue_head_init(&dp->rq);
-       __skb_queue_head_init(&dp->tq);
-       netif_start_queue(dev);
-
+       netif_tx_start_all_queues(dev);
        return 0;
 }
 
@@ -255,31 +289,30 @@ static int ifb_validate(struct nlattr *tb[], struct nlattr *data[])
 
 static struct rtnl_link_ops ifb_link_ops __read_mostly = {
        .kind           = "ifb",
-       .priv_size      = sizeof(struct ifb_private),
+       .priv_size      = sizeof(struct ifb_dev_private),
        .setup          = ifb_setup,
        .validate       = ifb_validate,
 };
 
-/* Number of ifb devices to be set up by this module. */
+/* Number of ifb devices to be set up by this module.
+ * Note that these legacy devices have one queue.
+ * Prefer something like : ip link add ifb10 numtxqueues 8 type ifb
+ */
+static int numifbs = 2;
 module_param(numifbs, int, 0);
 MODULE_PARM_DESC(numifbs, "Number of ifb devices");
 
 static int __init ifb_init_one(int index)
 {
        struct net_device *dev_ifb;
-       struct ifb_private *dp;
        int err;
 
-       dev_ifb = alloc_netdev(sizeof(struct ifb_private), "ifb%d",
+       dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d",
                               NET_NAME_UNKNOWN, ifb_setup);
 
        if (!dev_ifb)
                return -ENOMEM;
 
-       dp = netdev_priv(dev_ifb);
-       u64_stats_init(&dp->rsync);
-       u64_stats_init(&dp->tsync);
-
        dev_ifb->rtnl_link_ops = &ifb_link_ops;
        err = register_netdevice(dev_ifb);
        if (err < 0)
index cb86d7a0154228f5c3711898072a0e5f5d4d6a81..c07030dbe7484b50e1f49c9dff6a35ba6cf0fc95 100644 (file)
@@ -14,6 +14,11 @@ if PHYLIB
 
 comment "MII PHY device drivers"
 
+config AQUANTIA_PHY
+        tristate "Drivers for the Aquantia PHYs"
+        ---help---
+          Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405
+
 config AT803X_PHY
        tristate "Drivers for Atheros AT803X PHYs"
        ---help---
@@ -54,6 +59,11 @@ config VITESSE_PHY
         ---help---
           Currently supports the vsc8244
 
+config TERANETICS_PHY
+        tristate "Drivers for the Teranetics PHYs"
+        ---help---
+          Currently supports the Teranetics TN2020
+
 config SMSC_PHY
        tristate "Drivers for SMSC PHYs"
        ---help---
@@ -145,13 +155,13 @@ config MDIO_GPIO
          will be called mdio-gpio.
 
 config MDIO_OCTEON
-       tristate "Support for MDIO buses on Octeon SOCs"
-       depends on CAVIUM_OCTEON_SOC
-       default y
+       tristate "Support for MDIO buses on Octeon and ThunderX SOCs"
+       depends on 64BIT
        help
 
-         This module provides a driver for the Octeon MDIO busses.
-         It is required by the Octeon Ethernet device drivers.
+         This module provides a driver for the Octeon and ThunderX MDIO
+         busses. It is required by the Octeon and ThunderX ethernet device
+         drivers.
 
          If in doubt, say Y.
 
index fcc25a0c45cd01de449f677bb424cbe158a416c7..9bb103358c74d2c87054c08f0c35d1d83609a43a 100644 (file)
@@ -3,12 +3,14 @@
 libphy-objs                    := phy.o phy_device.o mdio_bus.o
 
 obj-$(CONFIG_PHYLIB)           += libphy.o
+obj-$(CONFIG_AQUANTIA_PHY)     += aquantia.o
 obj-$(CONFIG_MARVELL_PHY)      += marvell.o
 obj-$(CONFIG_DAVICOM_PHY)      += davicom.o
 obj-$(CONFIG_CICADA_PHY)       += cicada.o
 obj-$(CONFIG_LXT_PHY)          += lxt.o
 obj-$(CONFIG_QSEMI_PHY)                += qsemi.o
 obj-$(CONFIG_SMSC_PHY)         += smsc.o
+obj-$(CONFIG_TERANETICS_PHY)   += teranetics.o
 obj-$(CONFIG_VITESSE_PHY)      += vitesse.o
 obj-$(CONFIG_BROADCOM_PHY)     += broadcom.o
 obj-$(CONFIG_BCM63XX_PHY)      += bcm63xx.o
diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c
new file mode 100644 (file)
index 0000000..73d347d
--- /dev/null
@@ -0,0 +1,152 @@
+/*
+ * Driver for Aquantia PHY
+ *
+ * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
+ *
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/mdio.h>
+
+#define PHY_ID_AQ1202  0x03a1b445
+#define PHY_ID_AQ2104  0x03a1b460
+#define PHY_ID_AQR105  0x03a1b4a2
+#define PHY_ID_AQR405  0x03a1b4b0
+
+#define PHY_AQUANTIA_FEATURES  (SUPPORTED_10000baseT_Full | \
+                                SUPPORTED_1000baseT_Full | \
+                                SUPPORTED_100baseT_Full | \
+                                PHY_DEFAULT_FEATURES)
+
+static int aquantia_config_aneg(struct phy_device *phydev)
+{
+       phydev->supported = PHY_AQUANTIA_FEATURES;
+       phydev->advertising = phydev->supported;
+
+       return 0;
+}
+
+static int aquantia_aneg_done(struct phy_device *phydev)
+{
+       int reg;
+
+       reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+       return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE);
+}
+
+static int aquantia_read_status(struct phy_device *phydev)
+{
+       int reg;
+
+       reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+       reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+       if (reg & MDIO_STAT1_LSTATUS)
+               phydev->link = 1;
+       else
+               phydev->link = 0;
+
+       reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
+       mdelay(10);
+       reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800);
+
+       switch (reg) {
+       case 0x9:
+               phydev->speed = SPEED_2500;
+               break;
+       case 0x5:
+               phydev->speed = SPEED_1000;
+               break;
+       case 0x3:
+               phydev->speed = SPEED_100;
+               break;
+       case 0x7:
+       default:
+               phydev->speed = SPEED_10000;
+               break;
+       }
+       phydev->duplex = DUPLEX_FULL;
+
+       return 0;
+}
+
+static struct phy_driver aquantia_driver[] = {
+{
+       .phy_id         = PHY_ID_AQ1202,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "Aquantia AQ1202",
+       .features       = PHY_AQUANTIA_FEATURES,
+       .aneg_done      = aquantia_aneg_done,
+       .config_aneg    = aquantia_config_aneg,
+       .read_status    = aquantia_read_status,
+       .driver         = { .owner = THIS_MODULE,},
+},
+{
+       .phy_id         = PHY_ID_AQ2104,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "Aquantia AQ2104",
+       .features       = PHY_AQUANTIA_FEATURES,
+       .aneg_done      = aquantia_aneg_done,
+       .config_aneg    = aquantia_config_aneg,
+       .read_status    = aquantia_read_status,
+       .driver         = { .owner = THIS_MODULE,},
+},
+{
+       .phy_id         = PHY_ID_AQR105,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "Aquantia AQR105",
+       .features       = PHY_AQUANTIA_FEATURES,
+       .aneg_done      = aquantia_aneg_done,
+       .config_aneg    = aquantia_config_aneg,
+       .read_status    = aquantia_read_status,
+       .driver         = { .owner = THIS_MODULE,},
+},
+{
+       .phy_id         = PHY_ID_AQR405,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "Aquantia AQR405",
+       .features       = PHY_AQUANTIA_FEATURES,
+       .aneg_done      = aquantia_aneg_done,
+       .config_aneg    = aquantia_config_aneg,
+       .read_status    = aquantia_read_status,
+       .driver         = { .owner = THIS_MODULE,},
+},
+};
+
+static int __init aquantia_init(void)
+{
+       return phy_drivers_register(aquantia_driver,
+                                   ARRAY_SIZE(aquantia_driver));
+}
+
+static void __exit aquantia_exit(void)
+{
+       return phy_drivers_unregister(aquantia_driver,
+                                     ARRAY_SIZE(aquantia_driver));
+}
+
+module_init(aquantia_init);
+module_exit(aquantia_exit);
+
+static struct mdio_device_id __maybe_unused aquantia_tbl[] = {
+       { PHY_ID_AQ1202, 0xfffffff0 },
+       { PHY_ID_AQ2104, 0xfffffff0 },
+       { PHY_ID_AQR105, 0xfffffff0 },
+       { PHY_ID_AQR405, 0xfffffff0 },
+       { }
+};
+
+MODULE_DEVICE_TABLE(mdio, aquantia_tbl);
+
+MODULE_DESCRIPTION("Aquantia PHY driver");
+MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
+MODULE_LICENSE("GPL v2");
index 00cb41e713123689803e5dddfa527c3ebaee26ae..185b03c08e16ce9140b2b0d91abdfacd5b9258a1 100644 (file)
@@ -1449,17 +1449,9 @@ static int dp83640_ts_info(struct phy_device *dev, struct ethtool_ts_info *info)
        info->rx_filters =
                (1 << HWTSTAMP_FILTER_NONE) |
                (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
-               (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
-               (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
                (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
-               (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
-               (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
                (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
-               (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
-               (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
-               (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
-               (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
-               (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+               (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
        return 0;
 }
 
index 8a3bf546989212734aa9a5a5a5b58948ecf264da..32f10662f4ac7c189e3fa666f0891f515d2a4ca1 100644 (file)
@@ -123,12 +123,8 @@ static int dp83867_of_init(struct phy_device *phydev)
        if (ret)
                return ret;
 
-       ret = of_property_read_u32(of_node, "ti,fifo-depth",
+       return of_property_read_u32(of_node, "ti,fifo-depth",
                                   &dp83867->fifo_depth);
-       if (ret)
-               return ret;
-
-       return 0;
 }
 #else
 static int dp83867_of_init(struct phy_device *phydev)
index 1960b46add65b3b89f122cc401c872050cebdbe4..479b93f9581c4088e784340220a4b61837e9dbde 100644 (file)
@@ -52,6 +52,10 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
        u16 lpagb = 0;
        u16 lpa = 0;
 
+       if (!fp->status.link)
+               goto done;
+       bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
+
        if (fp->status.duplex) {
                bmcr |= BMCR_FULLDPLX;
 
@@ -96,15 +100,13 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
                }
        }
 
-       if (fp->status.link)
-               bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
-
        if (fp->status.pause)
                lpa |= LPA_PAUSE_CAP;
 
        if (fp->status.asym_pause)
                lpa |= LPA_PAUSE_ASYM;
 
+done:
        fp->regs[MII_PHYSID1] = 0;
        fp->regs[MII_PHYSID2] = 0;
 
index f721444c2b0a9413dd0bac8c7e61099cf6d3789b..3320a179ee360c6b3e8d90b6355f5cd18c8b4630 100644 (file)
@@ -48,6 +48,8 @@
 #define MII_M1011_IMASK_CLEAR          0x0000
 
 #define MII_M1011_PHY_SCR              0x10
+#define MII_M1011_PHY_SCR_MDI          0x0000
+#define MII_M1011_PHY_SCR_MDI_X                0x0020
 #define MII_M1011_PHY_SCR_AUTO_CROSS   0x0060
 
 #define MII_M1145_PHY_EXT_SR           0x1b
@@ -159,6 +161,43 @@ static int marvell_config_intr(struct phy_device *phydev)
        return err;
 }
 
+static int marvell_set_polarity(struct phy_device *phydev, int polarity)
+{
+       int reg;
+       int err;
+       int val;
+
+       /* get the current settings */
+       reg = phy_read(phydev, MII_M1011_PHY_SCR);
+       if (reg < 0)
+               return reg;
+
+       val = reg;
+       val &= ~MII_M1011_PHY_SCR_AUTO_CROSS;
+       switch (polarity) {
+       case ETH_TP_MDI:
+               val |= MII_M1011_PHY_SCR_MDI;
+               break;
+       case ETH_TP_MDI_X:
+               val |= MII_M1011_PHY_SCR_MDI_X;
+               break;
+       case ETH_TP_MDI_AUTO:
+       case ETH_TP_MDI_INVALID:
+       default:
+               val |= MII_M1011_PHY_SCR_AUTO_CROSS;
+               break;
+       }
+
+       if (val != reg) {
+               /* Set the new polarity value in the register */
+               err = phy_write(phydev, MII_M1011_PHY_SCR, val);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
 static int marvell_config_aneg(struct phy_device *phydev)
 {
        int err;
@@ -191,8 +230,7 @@ static int marvell_config_aneg(struct phy_device *phydev)
        if (err < 0)
                return err;
 
-       err = phy_write(phydev, MII_M1011_PHY_SCR,
-                       MII_M1011_PHY_SCR_AUTO_CROSS);
+       err = marvell_set_polarity(phydev, phydev->mdix);
        if (err < 0)
                return err;
 
index c838ad6155f7863cbed177f35c4416fad1c0ddb5..fcf4e4df7cc867c25cdb75a912a41d1a1bf58560 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/platform_device.h>
+#include <linux/of_address.h>
 #include <linux/of_mdio.h>
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/phy.h>
 #include <linux/io.h>
 
+#ifdef CONFIG_CAVIUM_OCTEON_SOC
 #include <asm/octeon/octeon.h>
-#include <asm/octeon/cvmx-smix-defs.h>
+#endif
 
-#define DRV_VERSION "1.0"
-#define DRV_DESCRIPTION "Cavium Networks Octeon SMI/MDIO driver"
+#define DRV_VERSION "1.1"
+#define DRV_DESCRIPTION "Cavium Networks Octeon/ThunderX SMI/MDIO driver"
 
 #define SMI_CMD                0x0
 #define SMI_WR_DAT     0x8
 #define SMI_CLK                0x18
 #define SMI_EN         0x20
 
+#ifdef __BIG_ENDIAN_BITFIELD
+#define OCT_MDIO_BITFIELD_FIELD(field, more)   \
+       field;                                  \
+       more
+
+#else
+#define OCT_MDIO_BITFIELD_FIELD(field, more)   \
+       more                                    \
+       field;
+
+#endif
+
+union cvmx_smix_clk {
+       u64 u64;
+       struct cvmx_smix_clk_s {
+         OCT_MDIO_BITFIELD_FIELD(u64 reserved_25_63:39,
+         OCT_MDIO_BITFIELD_FIELD(u64 mode:1,
+         OCT_MDIO_BITFIELD_FIELD(u64 reserved_21_23:3,
+         OCT_MDIO_BITFIELD_FIELD(u64 sample_hi:5,
+         OCT_MDIO_BITFIELD_FIELD(u64 sample_mode:1,
+         OCT_MDIO_BITFIELD_FIELD(u64 reserved_14_14:1,
+         OCT_MDIO_BITFIELD_FIELD(u64 clk_idle:1,
+         OCT_MDIO_BITFIELD_FIELD(u64 preamble:1,
+         OCT_MDIO_BITFIELD_FIELD(u64 sample:4,
+         OCT_MDIO_BITFIELD_FIELD(u64 phase:8,
+         ;))))))))))
+       } s;
+};
+
+union cvmx_smix_cmd {
+       u64 u64;
+       struct cvmx_smix_cmd_s {
+         OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+         OCT_MDIO_BITFIELD_FIELD(u64 phy_op:2,
+         OCT_MDIO_BITFIELD_FIELD(u64 reserved_13_15:3,
+         OCT_MDIO_BITFIELD_FIELD(u64 phy_adr:5,
+         OCT_MDIO_BITFIELD_FIELD(u64 reserved_5_7:3,
+         OCT_MDIO_BITFIELD_FIELD(u64 reg_adr:5,
+         ;))))))
+       } s;
+};
+
+union cvmx_smix_en {
+       u64 u64;
+       struct cvmx_smix_en_s {
+         OCT_MDIO_BITFIELD_FIELD(u64 reserved_1_63:63,
+         OCT_MDIO_BITFIELD_FIELD(u64 en:1,
+         ;))
+       } s;
+};
+
+union cvmx_smix_rd_dat {
+       u64 u64;
+       struct cvmx_smix_rd_dat_s {
+         OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+         OCT_MDIO_BITFIELD_FIELD(u64 pending:1,
+         OCT_MDIO_BITFIELD_FIELD(u64 val:1,
+         OCT_MDIO_BITFIELD_FIELD(u64 dat:16,
+         ;))))
+       } s;
+};
+
+union cvmx_smix_wr_dat {
+       u64 u64;
+       struct cvmx_smix_wr_dat_s {
+         OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46,
+         OCT_MDIO_BITFIELD_FIELD(u64 pending:1,
+         OCT_MDIO_BITFIELD_FIELD(u64 val:1,
+         OCT_MDIO_BITFIELD_FIELD(u64 dat:16,
+         ;))))
+       } s;
+};
+
 enum octeon_mdiobus_mode {
        UNINIT = 0,
        C22,
@@ -41,6 +116,21 @@ struct octeon_mdiobus {
        int phy_irq[PHY_MAX_ADDR];
 };
 
+#ifdef CONFIG_CAVIUM_OCTEON_SOC
+static void oct_mdio_writeq(u64 val, u64 addr)
+{
+       cvmx_write_csr(addr, val);
+}
+
+static u64 oct_mdio_readq(u64 addr)
+{
+       return cvmx_read_csr(addr);
+}
+#else
+#define oct_mdio_writeq(val, addr)     writeq_relaxed(val, (void *)addr)
+#define oct_mdio_readq(addr)           readq_relaxed((void *)addr)
+#endif
+
 static void octeon_mdiobus_set_mode(struct octeon_mdiobus *p,
                                    enum octeon_mdiobus_mode m)
 {
@@ -49,10 +139,10 @@ static void octeon_mdiobus_set_mode(struct octeon_mdiobus *p,
        if (m == p->mode)
                return;
 
-       smi_clk.u64 = cvmx_read_csr(p->register_base + SMI_CLK);
+       smi_clk.u64 = oct_mdio_readq(p->register_base + SMI_CLK);
        smi_clk.s.mode = (m == C45) ? 1 : 0;
        smi_clk.s.preamble = 1;
-       cvmx_write_csr(p->register_base + SMI_CLK, smi_clk.u64);
+       oct_mdio_writeq(smi_clk.u64, p->register_base + SMI_CLK);
        p->mode = m;
 }
 
@@ -67,7 +157,7 @@ static int octeon_mdiobus_c45_addr(struct octeon_mdiobus *p,
 
        smi_wr.u64 = 0;
        smi_wr.s.dat = regnum & 0xffff;
-       cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64);
+       oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
 
        regnum = (regnum >> 16) & 0x1f;
 
@@ -75,14 +165,14 @@ static int octeon_mdiobus_c45_addr(struct octeon_mdiobus *p,
        smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_45_ADDRESS */
        smi_cmd.s.phy_adr = phy_id;
        smi_cmd.s.reg_adr = regnum;
-       cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
+       oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
 
        do {
                /* Wait 1000 clocks so we don't saturate the RSL bus
                 * doing reads.
                 */
                __delay(1000);
-               smi_wr.u64 = cvmx_read_csr(p->register_base + SMI_WR_DAT);
+               smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT);
        } while (smi_wr.s.pending && --timeout);
 
        if (timeout <= 0)
@@ -114,14 +204,14 @@ static int octeon_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum)
        smi_cmd.s.phy_op = op;
        smi_cmd.s.phy_adr = phy_id;
        smi_cmd.s.reg_adr = regnum;
-       cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
+       oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
 
        do {
                /* Wait 1000 clocks so we don't saturate the RSL bus
                 * doing reads.
                 */
                __delay(1000);
-               smi_rd.u64 = cvmx_read_csr(p->register_base + SMI_RD_DAT);
+               smi_rd.u64 = oct_mdio_readq(p->register_base + SMI_RD_DAT);
        } while (smi_rd.s.pending && --timeout);
 
        if (smi_rd.s.val)
@@ -153,20 +243,20 @@ static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id,
 
        smi_wr.u64 = 0;
        smi_wr.s.dat = val;
-       cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64);
+       oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
 
        smi_cmd.u64 = 0;
        smi_cmd.s.phy_op = op;
        smi_cmd.s.phy_adr = phy_id;
        smi_cmd.s.reg_adr = regnum;
-       cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
+       oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
 
        do {
                /* Wait 1000 clocks so we don't saturate the RSL bus
                 * doing reads.
                 */
                __delay(1000);
-               smi_wr.u64 = cvmx_read_csr(p->register_base + SMI_WR_DAT);
+               smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT);
        } while (smi_wr.s.pending && --timeout);
 
        if (timeout <= 0)
@@ -187,30 +277,34 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
        if (res_mem == NULL) {
                dev_err(&pdev->dev, "found no memory resource\n");
-               err = -ENXIO;
-               goto fail;
+               return -ENXIO;
        }
+
        bus->mdio_phys = res_mem->start;
        bus->regsize = resource_size(res_mem);
+
        if (!devm_request_mem_region(&pdev->dev, bus->mdio_phys, bus->regsize,
                                     res_mem->name)) {
                dev_err(&pdev->dev, "request_mem_region failed\n");
-               goto fail;
+               return -ENXIO;
        }
+
        bus->register_base =
                (u64)devm_ioremap(&pdev->dev, bus->mdio_phys, bus->regsize);
+       if (!bus->register_base) {
+               dev_err(&pdev->dev, "dev_ioremap failed\n");
+               return -ENOMEM;
+       }
 
        bus->mii_bus = mdiobus_alloc();
-
        if (!bus->mii_bus)
                goto fail;
 
        smi_en.u64 = 0;
        smi_en.s.en = 1;
-       cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
+       oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
 
        bus->mii_bus->priv = bus;
        bus->mii_bus->irq = bus->phy_irq;
@@ -234,7 +328,7 @@ fail_register:
        mdiobus_free(bus->mii_bus);
 fail:
        smi_en.u64 = 0;
-       cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
+       oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
        return err;
 }
 
@@ -248,7 +342,7 @@ static int octeon_mdiobus_remove(struct platform_device *pdev)
        mdiobus_unregister(bus->mii_bus);
        mdiobus_free(bus->mii_bus);
        smi_en.u64 = 0;
-       cvmx_write_csr(bus->register_base + SMI_EN, smi_en.u64);
+       oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
        return 0;
 }
 
index b2197b506acbe86f3540d5ae1d8334129c2bbe57..84b1fba58ac3c8efcbbb0bf9311b442ac52614c1 100644 (file)
@@ -353,6 +353,8 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
 
        phydev->duplex = cmd->duplex;
 
+       phydev->mdix = cmd->eth_tp_mdix_ctrl;
+
        /* Restart the PHY */
        phy_start_aneg(phydev);
 
@@ -377,6 +379,7 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
        cmd->transceiver = phy_is_internal(phydev) ?
                XCVR_INTERNAL : XCVR_EXTERNAL;
        cmd->autoneg = phydev->autoneg;
+       cmd->eth_tp_mdix_ctrl = phydev->mdix;
 
        return 0;
 }
index 46530159256b3c8c09ad528caf38a5a7cfdb8295..f091d691cf6f1d1961f8e92d6107abdc7e9de9b0 100644 (file)
@@ -209,8 +209,6 @@ static int ks8995_reset(struct ks8995_switch *ks)
        return ks8995_start(ks);
 }
 
-/* ------------------------------------------------------------------------ */
-
 static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
        struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
 {
@@ -220,19 +218,9 @@ static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
        dev = container_of(kobj, struct device, kobj);
        ks8995 = dev_get_drvdata(dev);
 
-       if (unlikely(off > ks8995->regs_attr.size))
-               return 0;
-
-       if ((off + count) > ks8995->regs_attr.size)
-               count = ks8995->regs_attr.size - off;
-
-       if (unlikely(!count))
-               return count;
-
        return ks8995_read(ks8995, buf, off, count);
 }
 
-
 static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
        struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
 {
@@ -242,19 +230,9 @@ static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
        dev = container_of(kobj, struct device, kobj);
        ks8995 = dev_get_drvdata(dev);
 
-       if (unlikely(off >= ks8995->regs_attr.size))
-               return -EFBIG;
-
-       if ((off + count) > ks8995->regs_attr.size)
-               count = ks8995->regs_attr.size - off;
-
-       if (unlikely(!count))
-               return count;
-
        return ks8995_write(ks8995, buf, off, count);
 }
 
-
 static const struct bin_attribute ks8995_registers_attr = {
        .attr = {
                .name   = "registers",
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
new file mode 100644 (file)
index 0000000..91e1bec
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+ * Driver for Teranetics PHY
+ *
+ * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
+ *
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+
+MODULE_DESCRIPTION("Teranetics PHY driver");
+MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
+MODULE_LICENSE("GPL v2");
+
+#define PHY_ID_TN2020  0x00a19410
+#define MDIO_PHYXS_LNSTAT_SYNC0        0x0001
+#define MDIO_PHYXS_LNSTAT_SYNC1        0x0002
+#define MDIO_PHYXS_LNSTAT_SYNC2        0x0004
+#define MDIO_PHYXS_LNSTAT_SYNC3        0x0008
+#define MDIO_PHYXS_LNSTAT_ALIGN 0x1000
+
+#define MDIO_PHYXS_LANE_READY  (MDIO_PHYXS_LNSTAT_SYNC0 | \
+                               MDIO_PHYXS_LNSTAT_SYNC1 | \
+                               MDIO_PHYXS_LNSTAT_SYNC2 | \
+                               MDIO_PHYXS_LNSTAT_SYNC3 | \
+                               MDIO_PHYXS_LNSTAT_ALIGN)
+
+static int teranetics_config_init(struct phy_device *phydev)
+{
+       phydev->supported = SUPPORTED_10000baseT_Full;
+       phydev->advertising = SUPPORTED_10000baseT_Full;
+
+       return 0;
+}
+
+static int teranetics_soft_reset(struct phy_device *phydev)
+{
+       return 0;
+}
+
+static int teranetics_aneg_done(struct phy_device *phydev)
+{
+       int reg;
+
+       /* auto negotiation state can only be checked when using copper
+        * port, if using fiber port, just lie it's done.
+        */
+       if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) {
+               reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+               return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE);
+       }
+
+       return 1;
+}
+
+static int teranetics_config_aneg(struct phy_device *phydev)
+{
+       return 0;
+}
+
+static int teranetics_read_status(struct phy_device *phydev)
+{
+       int reg;
+
+       phydev->link = 1;
+
+       phydev->speed = SPEED_10000;
+       phydev->duplex = DUPLEX_FULL;
+
+       if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) {
+               reg = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_LNSTAT);
+               if (reg < 0 ||
+                   !((reg & MDIO_PHYXS_LANE_READY) == MDIO_PHYXS_LANE_READY)) {
+                       phydev->link = 0;
+                       return 0;
+               }
+
+               reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+               if (reg < 0 || !(reg & MDIO_STAT1_LSTATUS))
+                       phydev->link = 0;
+       }
+
+       return 0;
+}
+
+static int teranetics_match_phy_device(struct phy_device *phydev)
+{
+       return phydev->c45_ids.device_ids[3] == PHY_ID_TN2020;
+}
+
+static struct phy_driver teranetics_driver[] = {
+{
+       .phy_id         = PHY_ID_TN2020,
+       .phy_id_mask    = 0xffffffff,
+       .name           = "Teranetics TN2020",
+       .soft_reset     = teranetics_soft_reset,
+       .aneg_done      = teranetics_aneg_done,
+       .config_init    = teranetics_config_init,
+       .config_aneg    = teranetics_config_aneg,
+       .read_status    = teranetics_read_status,
+       .match_phy_device = teranetics_match_phy_device,
+       .driver         = { .owner = THIS_MODULE,},
+},
+};
+
+static int __init teranetics_init(void)
+{
+       return phy_drivers_register(teranetics_driver,
+                                   ARRAY_SIZE(teranetics_driver));
+}
+
+static void __exit teranetics_exit(void)
+{
+       return phy_drivers_unregister(teranetics_driver,
+                                     ARRAY_SIZE(teranetics_driver));
+}
+
+module_init(teranetics_init);
+module_exit(teranetics_exit);
+
+static struct mdio_device_id __maybe_unused teranetics_tbl[] = {
+       { PHY_ID_TN2020, 0xffffffff },
+       { }
+};
+
+MODULE_DEVICE_TABLE(mdio, teranetics_tbl);
index 7ba8d0885f120156c47f44884212a2fd73f604b9..1610b79ae3866725a12f9af8a2ed83255999217a 100644 (file)
@@ -106,6 +106,16 @@ config USB_RTL8152
          To compile this driver as a module, choose M here: the
          module will be called r8152.
 
+config USB_LAN78XX
+       tristate "Microchip LAN78XX Based USB Ethernet Adapters"
+       select MII
+       help
+         This option adds support for Microchip LAN78XX based USB 2
+         & USB 3 10/100/1000 Ethernet adapters.
+
+         To compile this driver as a module, choose M here: the
+         module will be called lan78xx.
+
 config USB_USBNET
        tristate "Multi-purpose USB Networking Framework"
        select MII
index e2797f1e1b31ee51f82c11d50b23e6bd274d29ab..cf6a0e610a7fcd8665ec93324997ed1db0486f69 100644 (file)
@@ -8,6 +8,7 @@ obj-$(CONFIG_USB_PEGASUS)       += pegasus.o
 obj-$(CONFIG_USB_RTL8150)      += rtl8150.o
 obj-$(CONFIG_USB_RTL8152)      += r8152.o
 obj-$(CONFIG_USB_HSO)          += hso.o
+obj-$(CONFIG_USB_LAN78XX)      += lan78xx.o
 obj-$(CONFIG_USB_NET_AX8817X)  += asix.o
 asix-y := asix_devices.o asix_common.o ax88172a.o
 obj-$(CONFIG_USB_NET_AX88179_178A)      += ax88179_178a.o
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
new file mode 100644 (file)
index 0000000..ec8bd34
--- /dev/null
@@ -0,0 +1,3530 @@
+/*
+ * Copyright (C) 2015 Microchip Technology
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/if_vlan.h>
+#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/mdio.h>
+#include <net/ip6_checksum.h>
+#include "lan78xx.h"
+
+#define DRIVER_AUTHOR  "WOOJUNG HUH <woojung.huh@microchip.com>"
+#define DRIVER_DESC    "LAN78XX USB 3.0 Gigabit Ethernet Devices"
+#define DRIVER_NAME    "lan78xx"
+#define DRIVER_VERSION "1.0.0"
+
+#define TX_TIMEOUT_JIFFIES             (5 * HZ)
+#define THROTTLE_JIFFIES               (HZ / 8)
+#define UNLINK_TIMEOUT_MS              3
+
+#define RX_MAX_QUEUE_MEMORY            (60 * 1518)
+
+#define SS_USB_PKT_SIZE                        (1024)
+#define HS_USB_PKT_SIZE                        (512)
+#define FS_USB_PKT_SIZE                        (64)
+
+#define MAX_RX_FIFO_SIZE               (12 * 1024)
+#define MAX_TX_FIFO_SIZE               (12 * 1024)
+#define DEFAULT_BURST_CAP_SIZE         (MAX_TX_FIFO_SIZE)
+#define DEFAULT_BULK_IN_DELAY          (0x0800)
+#define MAX_SINGLE_PACKET_SIZE         (9000)
+#define DEFAULT_TX_CSUM_ENABLE         (true)
+#define DEFAULT_RX_CSUM_ENABLE         (true)
+#define DEFAULT_TSO_CSUM_ENABLE                (true)
+#define DEFAULT_VLAN_FILTER_ENABLE     (true)
+#define INTERNAL_PHY_ID                        (2)     /* 2: GMII */
+#define TX_OVERHEAD                    (8)
+#define RXW_PADDING                    2
+
+#define LAN78XX_USB_VENDOR_ID          (0x0424)
+#define LAN7800_USB_PRODUCT_ID         (0x7800)
+#define LAN7850_USB_PRODUCT_ID         (0x7850)
+#define LAN78XX_EEPROM_MAGIC           (0x78A5)
+#define LAN78XX_OTP_MAGIC              (0x78F3)
+
+#define        MII_READ                        1
+#define        MII_WRITE                       0
+
+#define EEPROM_INDICATOR               (0xA5)
+#define EEPROM_MAC_OFFSET              (0x01)
+#define MAX_EEPROM_SIZE                        512
+#define OTP_INDICATOR_1                        (0xF3)
+#define OTP_INDICATOR_2                        (0xF7)
+
+#define WAKE_ALL                       (WAKE_PHY | WAKE_UCAST | \
+                                        WAKE_MCAST | WAKE_BCAST | \
+                                        WAKE_ARP | WAKE_MAGIC)
+
+/* USB related defines */
+#define BULK_IN_PIPE                   1
+#define BULK_OUT_PIPE                  2
+
+/* default autosuspend delay (mSec)*/
+#define DEFAULT_AUTOSUSPEND_DELAY      (10 * 1000)
+
+static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
+       "RX FCS Errors",
+       "RX Alignment Errors",
+       "Rx Fragment Errors",
+       "RX Jabber Errors",
+       "RX Undersize Frame Errors",
+       "RX Oversize Frame Errors",
+       "RX Dropped Frames",
+       "RX Unicast Byte Count",
+       "RX Broadcast Byte Count",
+       "RX Multicast Byte Count",
+       "RX Unicast Frames",
+       "RX Broadcast Frames",
+       "RX Multicast Frames",
+       "RX Pause Frames",
+       "RX 64 Byte Frames",
+       "RX 65 - 127 Byte Frames",
+       "RX 128 - 255 Byte Frames",
+       "RX 256 - 511 Bytes Frames",
+       "RX 512 - 1023 Byte Frames",
+       "RX 1024 - 1518 Byte Frames",
+       "RX Greater 1518 Byte Frames",
+       "EEE RX LPI Transitions",
+       "EEE RX LPI Time",
+       "TX FCS Errors",
+       "TX Excess Deferral Errors",
+       "TX Carrier Errors",
+       "TX Bad Byte Count",
+       "TX Single Collisions",
+       "TX Multiple Collisions",
+       "TX Excessive Collision",
+       "TX Late Collisions",
+       "TX Unicast Byte Count",
+       "TX Broadcast Byte Count",
+       "TX Multicast Byte Count",
+       "TX Unicast Frames",
+       "TX Broadcast Frames",
+       "TX Multicast Frames",
+       "TX Pause Frames",
+       "TX 64 Byte Frames",
+       "TX 65 - 127 Byte Frames",
+       "TX 128 - 255 Byte Frames",
+       "TX 256 - 511 Bytes Frames",
+       "TX 512 - 1023 Byte Frames",
+       "TX 1024 - 1518 Byte Frames",
+       "TX Greater 1518 Byte Frames",
+       "EEE TX LPI Transitions",
+       "EEE TX LPI Time",
+};
+
+struct lan78xx_statstage {
+       u32 rx_fcs_errors;
+       u32 rx_alignment_errors;
+       u32 rx_fragment_errors;
+       u32 rx_jabber_errors;
+       u32 rx_undersize_frame_errors;
+       u32 rx_oversize_frame_errors;
+       u32 rx_dropped_frames;
+       u32 rx_unicast_byte_count;
+       u32 rx_broadcast_byte_count;
+       u32 rx_multicast_byte_count;
+       u32 rx_unicast_frames;
+       u32 rx_broadcast_frames;
+       u32 rx_multicast_frames;
+       u32 rx_pause_frames;
+       u32 rx_64_byte_frames;
+       u32 rx_65_127_byte_frames;
+       u32 rx_128_255_byte_frames;
+       u32 rx_256_511_bytes_frames;
+       u32 rx_512_1023_byte_frames;
+       u32 rx_1024_1518_byte_frames;
+       u32 rx_greater_1518_byte_frames;
+       u32 eee_rx_lpi_transitions;
+       u32 eee_rx_lpi_time;
+       u32 tx_fcs_errors;
+       u32 tx_excess_deferral_errors;
+       u32 tx_carrier_errors;
+       u32 tx_bad_byte_count;
+       u32 tx_single_collisions;
+       u32 tx_multiple_collisions;
+       u32 tx_excessive_collision;
+       u32 tx_late_collisions;
+       u32 tx_unicast_byte_count;
+       u32 tx_broadcast_byte_count;
+       u32 tx_multicast_byte_count;
+       u32 tx_unicast_frames;
+       u32 tx_broadcast_frames;
+       u32 tx_multicast_frames;
+       u32 tx_pause_frames;
+       u32 tx_64_byte_frames;
+       u32 tx_65_127_byte_frames;
+       u32 tx_128_255_byte_frames;
+       u32 tx_256_511_bytes_frames;
+       u32 tx_512_1023_byte_frames;
+       u32 tx_1024_1518_byte_frames;
+       u32 tx_greater_1518_byte_frames;
+       u32 eee_tx_lpi_transitions;
+       u32 eee_tx_lpi_time;
+};
+
+struct lan78xx_net;
+
+struct lan78xx_priv {
+       struct lan78xx_net *dev;
+       u32 rfe_ctl;
+       u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
+       u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
+       u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
+       struct mutex dataport_mutex; /* for dataport access */
+       spinlock_t rfe_ctl_lock; /* for rfe register access */
+       struct work_struct set_multicast;
+       struct work_struct set_vlan;
+       u32 wol;
+};
+
+enum skb_state {
+       illegal = 0,
+       tx_start,
+       tx_done,
+       rx_start,
+       rx_done,
+       rx_cleanup,
+       unlink_start
+};
+
+struct skb_data {              /* skb->cb is one of these */
+       struct urb *urb;
+       struct lan78xx_net *dev;
+       enum skb_state state;
+       size_t length;
+};
+
+struct usb_context {
+       struct usb_ctrlrequest req;
+       struct lan78xx_net *dev;
+};
+
+#define EVENT_TX_HALT                  0
+#define EVENT_RX_HALT                  1
+#define EVENT_RX_MEMORY                        2
+#define EVENT_STS_SPLIT                        3
+#define EVENT_LINK_RESET               4
+#define EVENT_RX_PAUSED                        5
+#define EVENT_DEV_WAKING               6
+#define EVENT_DEV_ASLEEP               7
+#define EVENT_DEV_OPEN                 8
+
+struct lan78xx_net {
+       struct net_device       *net;
+       struct usb_device       *udev;
+       struct usb_interface    *intf;
+       void                    *driver_priv;
+
+       int                     rx_qlen;
+       int                     tx_qlen;
+       struct sk_buff_head     rxq;
+       struct sk_buff_head     txq;
+       struct sk_buff_head     done;
+       struct sk_buff_head     rxq_pause;
+       struct sk_buff_head     txq_pend;
+
+       struct tasklet_struct   bh;
+       struct delayed_work     wq;
+
+       struct usb_host_endpoint *ep_blkin;
+       struct usb_host_endpoint *ep_blkout;
+       struct usb_host_endpoint *ep_intr;
+
+       int                     msg_enable;
+
+       struct urb              *urb_intr;
+       struct usb_anchor       deferred;
+
+       struct mutex            phy_mutex; /* for phy access */
+       unsigned                pipe_in, pipe_out, pipe_intr;
+
+       u32                     hard_mtu;       /* count any extra framing */
+       size_t                  rx_urb_size;    /* size for rx urbs */
+
+       unsigned long           flags;
+
+       wait_queue_head_t       *wait;
+       unsigned char           suspend_count;
+
+       unsigned                maxpacket;
+       struct timer_list       delay;
+
+       unsigned long           data[5];
+       struct mii_if_info      mii;
+
+       int                     link_on;
+       u8                      mdix_ctrl;
+};
+
+/* use ethtool to change the level for any given device */
+static int msg_level = -1;
+module_param(msg_level, int, 0);
+MODULE_PARM_DESC(msg_level, "Override default message level");
+
+static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
+{
+       u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
+       int ret;
+
+       BUG_ON(!dev);
+
+       if (!buf)
+               return -ENOMEM;
+
+       ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+                             USB_VENDOR_REQUEST_READ_REGISTER,
+                             USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+                             0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
+       if (likely(ret >= 0)) {
+               le32_to_cpus(buf);
+               *data = *buf;
+       } else {
+               netdev_warn(dev->net,
+                           "Failed to read register index 0x%08x. ret = %d",
+                           index, ret);
+       }
+
+       kfree(buf);
+
+       return ret;
+}
+
+static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
+{
+       u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
+       int ret;
+
+       BUG_ON(!dev);
+
+       if (!buf)
+               return -ENOMEM;
+
+       *buf = data;
+       cpu_to_le32s(buf);
+
+       ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+                             USB_VENDOR_REQUEST_WRITE_REGISTER,
+                             USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+                             0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
+       if (unlikely(ret < 0)) {
+               netdev_warn(dev->net,
+                           "Failed to write register index 0x%08x. ret = %d",
+                           index, ret);
+       }
+
+       kfree(buf);
+
+       return ret;
+}
+
+static int lan78xx_read_stats(struct lan78xx_net *dev,
+                             struct lan78xx_statstage *data)
+{
+       int ret = 0;
+       int i;
+       struct lan78xx_statstage *stats;
+       u32 *src;
+       u32 *dst;
+
+       BUG_ON(!dev);
+       BUG_ON(!data);
+       BUG_ON(sizeof(struct lan78xx_statstage) != 0xBC);
+
+       stats = kmalloc(sizeof(*stats), GFP_KERNEL);
+       if (!stats)
+               return -ENOMEM;
+
+       ret = usb_control_msg(dev->udev,
+                             usb_rcvctrlpipe(dev->udev, 0),
+                             USB_VENDOR_REQUEST_GET_STATS,
+                             USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+                             0,
+                             0,
+                             (void *)stats,
+                             sizeof(*stats),
+                             USB_CTRL_SET_TIMEOUT);
+       if (likely(ret >= 0)) {
+               src = (u32 *)stats;
+               dst = (u32 *)data;
+               for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
+                       le32_to_cpus(&src[i]);
+                       dst[i] = src[i];
+               }
+       } else {
+               netdev_warn(dev->net,
+                           "Failed to read stat ret = 0x%x", ret);
+       }
+
+       kfree(stats);
+
+       return ret;
+}
+
+/* Loop until the read is completed with timeout called with phy_mutex held */
+static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
+{
+       unsigned long start_time = jiffies;
+       u32 val;
+       int ret;
+
+       do {
+               ret = lan78xx_read_reg(dev, MII_ACC, &val);
+               if (unlikely(ret < 0))
+                       return -EIO;
+
+               if (!(val & MII_ACC_MII_BUSY_))
+                       return 0;
+       } while (!time_after(jiffies, start_time + HZ));
+
+       return -EIO;
+}
+
+static inline u32 mii_access(int id, int index, int read)
+{
+       u32 ret;
+
+       ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
+       ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
+       if (read)
+               ret |= MII_ACC_MII_READ_;
+       else
+               ret |= MII_ACC_MII_WRITE_;
+       ret |= MII_ACC_MII_BUSY_;
+
+       return ret;
+}
+
+static int lan78xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       u32 val, addr;
+       int ret;
+
+       ret = usb_autopm_get_interface(dev->intf);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&dev->phy_mutex);
+
+       /* confirm MII not busy */
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       /* set the address, index & direction (read from PHY) */
+       phy_id &= dev->mii.phy_id_mask;
+       idx &= dev->mii.reg_num_mask;
+       addr = mii_access(phy_id, idx, MII_READ);
+       ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       ret = lan78xx_read_reg(dev, MII_DATA, &val);
+
+       ret = (int)(val & 0xFFFF);
+
+done:
+       mutex_unlock(&dev->phy_mutex);
+       usb_autopm_put_interface(dev->intf);
+       return ret;
+}
+
+static void lan78xx_mdio_write(struct net_device *netdev, int phy_id,
+                              int idx, int regval)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       u32 val, addr;
+       int ret;
+
+       if (usb_autopm_get_interface(dev->intf) < 0)
+               return;
+
+       mutex_lock(&dev->phy_mutex);
+
+       /* confirm MII not busy */
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       val = regval;
+       ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+       /* set the address, index & direction (write to PHY) */
+       phy_id &= dev->mii.phy_id_mask;
+       idx &= dev->mii.reg_num_mask;
+       addr = mii_access(phy_id, idx, MII_WRITE);
+       ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+done:
+       mutex_unlock(&dev->phy_mutex);
+       usb_autopm_put_interface(dev->intf);
+}
+
+static void lan78xx_mmd_write(struct net_device *netdev, int phy_id,
+                             int mmddev, int mmdidx, int regval)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       u32 val, addr;
+       int ret;
+
+       if (usb_autopm_get_interface(dev->intf) < 0)
+               return;
+
+       mutex_lock(&dev->phy_mutex);
+
+       /* confirm MII not busy */
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       mmddev &= 0x1F;
+
+       /* set up device address for MMD */
+       ret = lan78xx_write_reg(dev, MII_DATA, mmddev);
+
+       phy_id &= dev->mii.phy_id_mask;
+       addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+       ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       /* select register of MMD */
+       val = mmdidx;
+       ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+       phy_id &= dev->mii.phy_id_mask;
+       addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
+       ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       /* select register data for MMD */
+       val = PHY_MMD_CTRL_OP_DNI_ | mmddev;
+       ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+       phy_id &= dev->mii.phy_id_mask;
+       addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+       ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       /* write to MMD */
+       val = regval;
+       ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+       phy_id &= dev->mii.phy_id_mask;
+       addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
+       ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+done:
+       mutex_unlock(&dev->phy_mutex);
+       usb_autopm_put_interface(dev->intf);
+}
+
+static int lan78xx_mmd_read(struct net_device *netdev, int phy_id,
+                           int mmddev, int mmdidx)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       u32 val, addr;
+       int ret;
+
+       ret = usb_autopm_get_interface(dev->intf);
+       if (ret < 0)
+               return ret;
+
+       mutex_lock(&dev->phy_mutex);
+
+       /* confirm MII not busy */
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       /* set up device address for MMD */
+       ret = lan78xx_write_reg(dev, MII_DATA, mmddev);
+
+       phy_id &= dev->mii.phy_id_mask;
+       addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+       ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       /* select register of MMD */
+       val = mmdidx;
+       ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+       phy_id &= dev->mii.phy_id_mask;
+       addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_WRITE);
+       ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       /* select register data for MMD */
+       val = PHY_MMD_CTRL_OP_DNI_ | mmddev;
+       ret = lan78xx_write_reg(dev, MII_DATA, val);
+
+       phy_id &= dev->mii.phy_id_mask;
+       addr = mii_access(phy_id, PHY_MMD_CTL, MII_WRITE);
+       ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       /* set the address, index & direction (read from PHY) */
+       phy_id &= dev->mii.phy_id_mask;
+       addr = mii_access(phy_id, PHY_MMD_REG_DATA, MII_READ);
+       ret = lan78xx_write_reg(dev, MII_ACC, addr);
+
+       ret = lan78xx_phy_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       /* read from MMD */
+       ret = lan78xx_read_reg(dev, MII_DATA, &val);
+
+       ret = (int)(val & 0xFFFF);
+
+done:
+       mutex_unlock(&dev->phy_mutex);
+       usb_autopm_put_interface(dev->intf);
+       return ret;
+}
+
+static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
+{
+       unsigned long start_time = jiffies;
+       u32 val;
+       int ret;
+
+       do {
+               ret = lan78xx_read_reg(dev, E2P_CMD, &val);
+               if (unlikely(ret < 0))
+                       return -EIO;
+
+               if (!(val & E2P_CMD_EPC_BUSY_) ||
+                   (val & E2P_CMD_EPC_TIMEOUT_))
+                       break;
+               usleep_range(40, 100);
+       } while (!time_after(jiffies, start_time + HZ));
+
+       if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
+               netdev_warn(dev->net, "EEPROM read operation timeout");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
+{
+       unsigned long start_time = jiffies;
+       u32 val;
+       int ret;
+
+       do {
+               ret = lan78xx_read_reg(dev, E2P_CMD, &val);
+               if (unlikely(ret < 0))
+                       return -EIO;
+
+               if (!(val & E2P_CMD_EPC_BUSY_))
+                       return 0;
+
+               usleep_range(40, 100);
+       } while (!time_after(jiffies, start_time + HZ));
+
+       netdev_warn(dev->net, "EEPROM is busy");
+       return -EIO;
+}
+
+static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
+                                  u32 length, u8 *data)
+{
+       u32 val;
+       int i, ret;
+
+       BUG_ON(!dev);
+       BUG_ON(!data);
+
+       ret = lan78xx_eeprom_confirm_not_busy(dev);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < length; i++) {
+               val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
+               val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
+               ret = lan78xx_write_reg(dev, E2P_CMD, val);
+               if (unlikely(ret < 0))
+                       return -EIO;
+
+               ret = lan78xx_wait_eeprom(dev);
+               if (ret < 0)
+                       return ret;
+
+               ret = lan78xx_read_reg(dev, E2P_DATA, &val);
+               if (unlikely(ret < 0))
+                       return -EIO;
+
+               data[i] = val & 0xFF;
+               offset++;
+       }
+
+       return 0;
+}
+
+static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
+                              u32 length, u8 *data)
+{
+       u8 sig;
+       int ret;
+
+       ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
+       if ((ret == 0) && (sig == EEPROM_INDICATOR))
+               ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
+       else
+               ret = -EINVAL;
+
+       return ret;
+}
+
+static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
+                                   u32 length, u8 *data)
+{
+       u32 val;
+       int i, ret;
+
+       BUG_ON(!dev);
+       BUG_ON(!data);
+
+       ret = lan78xx_eeprom_confirm_not_busy(dev);
+       if (ret)
+               return ret;
+
+       /* Issue write/erase enable command */
+       val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
+       ret = lan78xx_write_reg(dev, E2P_CMD, val);
+       if (unlikely(ret < 0))
+               return -EIO;
+
+       ret = lan78xx_wait_eeprom(dev);
+       if (ret < 0)
+               return ret;
+
+       for (i = 0; i < length; i++) {
+               /* Fill data register */
+               val = data[i];
+               ret = lan78xx_write_reg(dev, E2P_DATA, val);
+               if (ret < 0)
+                       return ret;
+
+               /* Send "write" command */
+               val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
+               val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
+               ret = lan78xx_write_reg(dev, E2P_CMD, val);
+               if (ret < 0)
+                       return ret;
+
+               ret = lan78xx_wait_eeprom(dev);
+               if (ret < 0)
+                       return ret;
+
+               offset++;
+       }
+
+       return 0;
+}
+
+static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
+                               u32 length, u8 *data)
+{
+       int i;
+       int ret;
+       u32 buf;
+       unsigned long timeout;
+
+       ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+
+       if (buf & OTP_PWR_DN_PWRDN_N_) {
+               /* clear it and wait to be cleared */
+               ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
+
+               timeout = jiffies + HZ;
+               do {
+                       usleep_range(1, 10);
+                       ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
+                       if (time_after(jiffies, timeout)) {
+                               netdev_warn(dev->net,
+                                           "timeout on OTP_PWR_DN");
+                               return -EIO;
+                       }
+               } while (buf & OTP_PWR_DN_PWRDN_N_);
+       }
+
+       for (i = 0; i < length; i++) {
+               ret = lan78xx_write_reg(dev, OTP_ADDR1,
+                                       ((offset + i) >> 8) & OTP_ADDR1_15_11);
+               ret = lan78xx_write_reg(dev, OTP_ADDR2,
+                                       ((offset + i) & OTP_ADDR2_10_3));
+
+               ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
+               ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
+
+               timeout = jiffies + HZ;
+               do {
+                       udelay(1);
+                       ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
+                       if (time_after(jiffies, timeout)) {
+                               netdev_warn(dev->net,
+                                           "timeout on OTP_STATUS");
+                               return -EIO;
+                       }
+               } while (buf & OTP_STATUS_BUSY_);
+
+               ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
+
+               data[i] = (u8)(buf & 0xFF);
+       }
+
+       return 0;
+}
+
+static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
+                           u32 length, u8 *data)
+{
+       u8 sig;
+       int ret;
+
+       ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
+
+       if (ret == 0) {
+               if (sig == OTP_INDICATOR_1)
+                       offset = offset;
+               else if (sig == OTP_INDICATOR_2)
+                       offset += 0x100;
+               else
+                       ret = -EINVAL;
+               ret = lan78xx_read_raw_otp(dev, offset, length, data);
+       }
+
+       return ret;
+}
+
+static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
+{
+       int i, ret;
+
+       for (i = 0; i < 100; i++) {
+               u32 dp_sel;
+
+               ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
+               if (unlikely(ret < 0))
+                       return -EIO;
+
+               if (dp_sel & DP_SEL_DPRDY_)
+                       return 0;
+
+               usleep_range(40, 100);
+       }
+
+       netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
+
+       return -EIO;
+}
+
+static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
+                                 u32 addr, u32 length, u32 *buf)
+{
+       struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+       u32 dp_sel;
+       int i, ret;
+
+       if (usb_autopm_get_interface(dev->intf) < 0)
+                       return 0;
+
+       mutex_lock(&pdata->dataport_mutex);
+
+       ret = lan78xx_dataport_wait_not_busy(dev);
+       if (ret < 0)
+               goto done;
+
+       ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
+
+       dp_sel &= ~DP_SEL_RSEL_MASK_;
+       dp_sel |= ram_select;
+       ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
+
+       for (i = 0; i < length; i++) {
+               ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
+
+               ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
+
+               ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
+
+               ret = lan78xx_dataport_wait_not_busy(dev);
+               if (ret < 0)
+                       goto done;
+       }
+
+done:
+       mutex_unlock(&pdata->dataport_mutex);
+       usb_autopm_put_interface(dev->intf);
+
+       return ret;
+}
+
+static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
+                                   int index, u8 addr[ETH_ALEN])
+{
+       u32     temp;
+
+       if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
+               temp = addr[3];
+               temp = addr[2] | (temp << 8);
+               temp = addr[1] | (temp << 8);
+               temp = addr[0] | (temp << 8);
+               pdata->pfilter_table[index][1] = temp;
+               temp = addr[5];
+               temp = addr[4] | (temp << 8);
+               temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
+               pdata->pfilter_table[index][0] = temp;
+       }
+}
+
+/* returns hash bit number for given MAC address */
+static inline u32 lan78xx_hash(char addr[ETH_ALEN])
+{
+       return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
+}
+
+static void lan78xx_deferred_multicast_write(struct work_struct *param)
+{
+       struct lan78xx_priv *pdata =
+                       container_of(param, struct lan78xx_priv, set_multicast);
+       struct lan78xx_net *dev = pdata->dev;
+       int i;
+       int ret;
+
+       netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
+                 pdata->rfe_ctl);
+
+       lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
+                              DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
+
+       for (i = 1; i < NUM_OF_MAF; i++) {
+               ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
+               ret = lan78xx_write_reg(dev, MAF_LO(i),
+                                       pdata->pfilter_table[i][1]);
+               ret = lan78xx_write_reg(dev, MAF_HI(i),
+                                       pdata->pfilter_table[i][0]);
+       }
+
+       ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+}
+
+static void lan78xx_set_multicast(struct net_device *netdev)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+       unsigned long flags;
+       int i;
+
+       spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
+
+       pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
+                           RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
+
+       for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
+                       pdata->mchash_table[i] = 0;
+       /* pfilter_table[0] has own HW address */
+       for (i = 1; i < NUM_OF_MAF; i++) {
+                       pdata->pfilter_table[i][0] =
+                       pdata->pfilter_table[i][1] = 0;
+       }
+
+       pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
+
+       if (dev->net->flags & IFF_PROMISC) {
+               netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
+               pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
+       } else {
+               if (dev->net->flags & IFF_ALLMULTI) {
+                       netif_dbg(dev, drv, dev->net,
+                                 "receive all multicast enabled");
+                       pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
+               }
+       }
+
+       if (netdev_mc_count(dev->net)) {
+               struct netdev_hw_addr *ha;
+               int i;
+
+               netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
+
+               pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
+
+               i = 1;
+               netdev_for_each_mc_addr(ha, netdev) {
+                       /* set first 32 into Perfect Filter */
+                       if (i < 33) {
+                               lan78xx_set_addr_filter(pdata, i, ha->addr);
+                       } else {
+                               u32 bitnum = lan78xx_hash(ha->addr);
+
+                               pdata->mchash_table[bitnum / 32] |=
+                                                       (1 << (bitnum % 32));
+                               pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
+                       }
+                       i++;
+               }
+       }
+
+       spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
+
+       /* defer register writes to a sleepable context */
+       schedule_work(&pdata->set_multicast);
+}
+
+static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
+                                     u16 lcladv, u16 rmtadv)
+{
+       u32 flow = 0, fct_flow = 0;
+       int ret;
+
+       u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+
+       if (cap & FLOW_CTRL_TX)
+               flow = (FLOW_CR_TX_FCEN_ | 0xFFFF);
+
+       if (cap & FLOW_CTRL_RX)
+               flow |= FLOW_CR_RX_FCEN_;
+
+       if (dev->udev->speed == USB_SPEED_SUPER)
+               fct_flow = 0x817;
+       else if (dev->udev->speed == USB_SPEED_HIGH)
+               fct_flow = 0x211;
+
+       netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
+                 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
+                 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
+
+       ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
+
+       /* threshold value should be set before enabling flow */
+       ret = lan78xx_write_reg(dev, FLOW, flow);
+
+       return 0;
+}
+
+static int lan78xx_link_reset(struct lan78xx_net *dev)
+{
+       struct mii_if_info *mii = &dev->mii;
+       struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+       u16 ladv, radv;
+       int ret;
+       u32 buf;
+
+       /* clear PHY interrupt status */
+       /* VTSE PHY */
+       ret = lan78xx_mdio_read(dev->net, mii->phy_id, PHY_VTSE_INT_STS);
+       if (unlikely(ret < 0))
+               return -EIO;
+
+       /* clear LAN78xx interrupt status */
+       ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
+       if (unlikely(ret < 0))
+               return -EIO;
+
+       if (!mii_link_ok(mii) && dev->link_on) {
+               dev->link_on = false;
+               netif_carrier_off(dev->net);
+
+               /* reset MAC */
+               ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+               if (unlikely(ret < 0))
+                       return -EIO;
+               buf |= MAC_CR_RST_;
+               ret = lan78xx_write_reg(dev, MAC_CR, buf);
+               if (unlikely(ret < 0))
+                       return -EIO;
+       } else if (mii_link_ok(mii) && !dev->link_on) {
+               dev->link_on = true;
+
+               mii_check_media(mii, 1, 1);
+               mii_ethtool_gset(&dev->mii, &ecmd);
+
+               mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS);
+
+               if (dev->udev->speed == USB_SPEED_SUPER) {
+                       if (ethtool_cmd_speed(&ecmd) == 1000) {
+                               /* disable U2 */
+                               ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+                               buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
+                               ret = lan78xx_write_reg(dev, USB_CFG1, buf);
+                               /* enable U1 */
+                               ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+                               buf |= USB_CFG1_DEV_U1_INIT_EN_;
+                               ret = lan78xx_write_reg(dev, USB_CFG1, buf);
+                       } else {
+                               /* enable U1 & U2 */
+                               ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+                               buf |= USB_CFG1_DEV_U2_INIT_EN_;
+                               buf |= USB_CFG1_DEV_U1_INIT_EN_;
+                               ret = lan78xx_write_reg(dev, USB_CFG1, buf);
+                       }
+               }
+
+               ladv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
+               if (unlikely(ladv < 0))
+                       return -EIO;
+
+               radv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
+               if (unlikely(radv < 0))
+                       return -EIO;
+
+               netif_dbg(dev, link, dev->net,
+                         "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
+                         ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
+
+               ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
+               netif_carrier_on(dev->net);
+       }
+
+       return ret;
+}
+
+/* some work can't be done in tasklets, so we use keventd
+ *
+ * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
+ * but tasklet_schedule() doesn't.     hope the failure is rare.
+ */
+void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
+{
+       set_bit(work, &dev->flags);
+       if (!schedule_delayed_work(&dev->wq, 0))
+               netdev_err(dev->net, "kevent %d may have been dropped\n", work);
+}
+
+static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
+{
+       u32 intdata;
+
+       if (urb->actual_length != 4) {
+               netdev_warn(dev->net,
+                           "unexpected urb length %d", urb->actual_length);
+               return;
+       }
+
+       memcpy(&intdata, urb->transfer_buffer, 4);
+       le32_to_cpus(&intdata);
+
+       if (intdata & INT_ENP_PHY_INT) {
+               netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
+                         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
+       } else
+               netdev_warn(dev->net,
+                           "unexpected interrupt: 0x%08x\n", intdata);
+}
+
+static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
+{
+       return MAX_EEPROM_SIZE;
+}
+
+static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
+                                     struct ethtool_eeprom *ee, u8 *data)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+
+       ee->magic = LAN78XX_EEPROM_MAGIC;
+
+       return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
+}
+
+static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
+                                     struct ethtool_eeprom *ee, u8 *data)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+
+       /* Allow entire eeprom update only */
+       if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
+           (ee->offset == 0) &&
+           (ee->len == 512) &&
+           (data[0] == EEPROM_INDICATOR))
+               return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
+       else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
+                (ee->offset == 0) &&
+                (ee->len == 512) &&
+                (data[0] == OTP_INDICATOR_1))
+               return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
+
+       return -EINVAL;
+}
+
+static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
+                               u8 *data)
+{
+       if (stringset == ETH_SS_STATS)
+               memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
+}
+
+static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
+{
+       if (sset == ETH_SS_STATS)
+               return ARRAY_SIZE(lan78xx_gstrings);
+       else
+               return -EOPNOTSUPP;
+}
+
+static void lan78xx_get_stats(struct net_device *netdev,
+                             struct ethtool_stats *stats, u64 *data)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       struct lan78xx_statstage lan78xx_stat;
+       u32 *p;
+       int i;
+
+       if (usb_autopm_get_interface(dev->intf) < 0)
+               return;
+
+       if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) {
+               p = (u32 *)&lan78xx_stat;
+               for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++)
+                       data[i] = p[i];
+       }
+
+       usb_autopm_put_interface(dev->intf);
+}
+
+static void lan78xx_get_wol(struct net_device *netdev,
+                           struct ethtool_wolinfo *wol)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       int ret;
+       u32 buf;
+       struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+
+       if (usb_autopm_get_interface(dev->intf) < 0)
+                       return;
+
+       ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
+       if (unlikely(ret < 0)) {
+               wol->supported = 0;
+               wol->wolopts = 0;
+       } else {
+               if (buf & USB_CFG_RMT_WKP_) {
+                       wol->supported = WAKE_ALL;
+                       wol->wolopts = pdata->wol;
+               } else {
+                       wol->supported = 0;
+                       wol->wolopts = 0;
+               }
+       }
+
+       usb_autopm_put_interface(dev->intf);
+}
+
+static int lan78xx_set_wol(struct net_device *netdev,
+                          struct ethtool_wolinfo *wol)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+       int ret;
+
+       ret = usb_autopm_get_interface(dev->intf);
+       if (ret < 0)
+               return ret;
+
+       pdata->wol = 0;
+       if (wol->wolopts & WAKE_UCAST)
+               pdata->wol |= WAKE_UCAST;
+       if (wol->wolopts & WAKE_MCAST)
+               pdata->wol |= WAKE_MCAST;
+       if (wol->wolopts & WAKE_BCAST)
+               pdata->wol |= WAKE_BCAST;
+       if (wol->wolopts & WAKE_MAGIC)
+               pdata->wol |= WAKE_MAGIC;
+       if (wol->wolopts & WAKE_PHY)
+               pdata->wol |= WAKE_PHY;
+       if (wol->wolopts & WAKE_ARP)
+               pdata->wol |= WAKE_ARP;
+
+       device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
+
+       usb_autopm_put_interface(dev->intf);
+
+       return ret;
+}
+
+static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+       int ret;
+       u32 buf;
+       u32 adv, lpadv;
+
+       ret = usb_autopm_get_interface(dev->intf);
+       if (ret < 0)
+               return ret;
+
+       ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+       if (buf & MAC_CR_EEE_EN_) {
+               buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
+                                      PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT);
+               adv = mmd_eee_adv_to_ethtool_adv_t(buf);
+               buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
+                                      PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT);
+               lpadv = mmd_eee_adv_to_ethtool_adv_t(buf);
+
+               edata->eee_enabled = true;
+               edata->supported = true;
+               edata->eee_active = !!(adv & lpadv);
+               edata->advertised = adv;
+               edata->lp_advertised = lpadv;
+               edata->tx_lpi_enabled = true;
+               /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
+               ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
+               edata->tx_lpi_timer = buf;
+       } else {
+               buf = lan78xx_mmd_read(dev->net, dev->mii.phy_id,
+                                      PHY_MMD_DEV_7, PHY_EEE_LP_ADVERTISEMENT);
+               lpadv = mmd_eee_adv_to_ethtool_adv_t(buf);
+
+               edata->eee_enabled = false;
+               edata->eee_active = false;
+               edata->supported = false;
+               edata->advertised = 0;
+               edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(lpadv);
+               edata->tx_lpi_enabled = false;
+               edata->tx_lpi_timer = 0;
+       }
+
+       usb_autopm_put_interface(dev->intf);
+
+       return 0;
+}
+
+static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+       int ret;
+       u32 buf;
+
+       ret = usb_autopm_get_interface(dev->intf);
+       if (ret < 0)
+               return ret;
+
+       if (edata->eee_enabled) {
+               ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+               buf |= MAC_CR_EEE_EN_;
+               ret = lan78xx_write_reg(dev, MAC_CR, buf);
+
+               buf = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+               lan78xx_mmd_write(dev->net, dev->mii.phy_id,
+                                 PHY_MMD_DEV_7, PHY_EEE_ADVERTISEMENT, buf);
+       } else {
+               ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+               buf &= ~MAC_CR_EEE_EN_;
+               ret = lan78xx_write_reg(dev, MAC_CR, buf);
+       }
+
+       usb_autopm_put_interface(dev->intf);
+
+       return 0;
+}
+
+static u32 lan78xx_get_link(struct net_device *net)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+
+       return mii_link_ok(&dev->mii);
+}
+
+int lan78xx_nway_reset(struct net_device *net)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+
+       if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
+               return -EOPNOTSUPP;
+
+       return mii_nway_restart(&dev->mii);
+}
+
+static void lan78xx_get_drvinfo(struct net_device *net,
+                               struct ethtool_drvinfo *info)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+
+       strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+       strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
+       usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
+}
+
+static u32 lan78xx_get_msglevel(struct net_device *net)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+
+       return dev->msg_enable;
+}
+
+static void lan78xx_set_msglevel(struct net_device *net, u32 level)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+
+       dev->msg_enable = level;
+}
+
+static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+       struct mii_if_info *mii = &dev->mii;
+       int ret;
+       int buf;
+
+       if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
+               return -EOPNOTSUPP;
+
+       ret = usb_autopm_get_interface(dev->intf);
+       if (ret < 0)
+               return ret;
+
+       ret = mii_ethtool_gset(&dev->mii, cmd);
+
+       mii->mdio_write(mii->dev, mii->phy_id,
+                       PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
+       buf = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL);
+       mii->mdio_write(mii->dev, mii->phy_id,
+                       PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
+
+       buf &= PHY_EXT_MODE_CTRL_MDIX_MASK_;
+       if (buf == PHY_EXT_MODE_CTRL_AUTO_MDIX_) {
+               cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
+               cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+       } else if (buf == PHY_EXT_MODE_CTRL_MDI_) {
+               cmd->eth_tp_mdix = ETH_TP_MDI;
+               cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
+       } else if (buf == PHY_EXT_MODE_CTRL_MDI_X_) {
+               cmd->eth_tp_mdix = ETH_TP_MDI_X;
+               cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
+       }
+
+       usb_autopm_put_interface(dev->intf);
+
+       return ret;
+}
+
+static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+       struct mii_if_info *mii = &dev->mii;
+       int ret = 0;
+       int temp;
+
+       if ((!dev->mii.mdio_read) || (!dev->mii.mdio_write))
+               return -EOPNOTSUPP;
+
+       ret = usb_autopm_get_interface(dev->intf);
+       if (ret < 0)
+               return ret;
+
+       if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
+               if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI) {
+                       mii->mdio_write(mii->dev, mii->phy_id,
+                                       PHY_EXT_GPIO_PAGE,
+                                       PHY_EXT_GPIO_PAGE_SPACE_1);
+                       temp = mii->mdio_read(mii->dev, mii->phy_id,
+                                       PHY_EXT_MODE_CTRL);
+                       temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+                       mii->mdio_write(mii->dev, mii->phy_id,
+                                       PHY_EXT_MODE_CTRL,
+                                       temp | PHY_EXT_MODE_CTRL_MDI_);
+                       mii->mdio_write(mii->dev, mii->phy_id,
+                                       PHY_EXT_GPIO_PAGE,
+                                       PHY_EXT_GPIO_PAGE_SPACE_0);
+               } else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_X) {
+                       mii->mdio_write(mii->dev, mii->phy_id,
+                                       PHY_EXT_GPIO_PAGE,
+                                       PHY_EXT_GPIO_PAGE_SPACE_1);
+                       temp = mii->mdio_read(mii->dev, mii->phy_id,
+                                       PHY_EXT_MODE_CTRL);
+                       temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+                       mii->mdio_write(mii->dev, mii->phy_id,
+                                       PHY_EXT_MODE_CTRL,
+                                       temp | PHY_EXT_MODE_CTRL_MDI_X_);
+                       mii->mdio_write(mii->dev, mii->phy_id,
+                                       PHY_EXT_GPIO_PAGE,
+                                       PHY_EXT_GPIO_PAGE_SPACE_0);
+               } else if (cmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) {
+                       mii->mdio_write(mii->dev, mii->phy_id,
+                                       PHY_EXT_GPIO_PAGE,
+                                       PHY_EXT_GPIO_PAGE_SPACE_1);
+                       temp = mii->mdio_read(mii->dev, mii->phy_id,
+                                                       PHY_EXT_MODE_CTRL);
+                       temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+                       mii->mdio_write(mii->dev, mii->phy_id,
+                                       PHY_EXT_MODE_CTRL,
+                                       temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
+                       mii->mdio_write(mii->dev, mii->phy_id,
+                                       PHY_EXT_GPIO_PAGE,
+                                       PHY_EXT_GPIO_PAGE_SPACE_0);
+               }
+       }
+
+       /* change speed & duplex */
+       ret = mii_ethtool_sset(&dev->mii, cmd);
+
+       if (!cmd->autoneg) {
+               /* force link down */
+               temp = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR);
+               mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR,
+                               temp | BMCR_LOOPBACK);
+               mdelay(1);
+               mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, temp);
+       }
+
+       usb_autopm_put_interface(dev->intf);
+
+       return ret;
+}
+
+static const struct ethtool_ops lan78xx_ethtool_ops = {
+       .get_link       = lan78xx_get_link,
+       .nway_reset     = lan78xx_nway_reset,
+       .get_drvinfo    = lan78xx_get_drvinfo,
+       .get_msglevel   = lan78xx_get_msglevel,
+       .set_msglevel   = lan78xx_set_msglevel,
+       .get_settings   = lan78xx_get_settings,
+       .set_settings   = lan78xx_set_settings,
+       .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
+       .get_eeprom     = lan78xx_ethtool_get_eeprom,
+       .set_eeprom     = lan78xx_ethtool_set_eeprom,
+       .get_ethtool_stats = lan78xx_get_stats,
+       .get_sset_count = lan78xx_get_sset_count,
+       .get_strings    = lan78xx_get_strings,
+       .get_wol        = lan78xx_get_wol,
+       .set_wol        = lan78xx_set_wol,
+       .get_eee        = lan78xx_get_eee,
+       .set_eee        = lan78xx_set_eee,
+};
+
+static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+
+       if (!netif_running(netdev))
+               return -EINVAL;
+
+       return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+}
+
+static void lan78xx_init_mac_address(struct lan78xx_net *dev)
+{
+       u32 addr_lo, addr_hi;
+       int ret;
+       u8 addr[6];
+
+       ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
+       ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
+
+       addr[0] = addr_lo & 0xFF;
+       addr[1] = (addr_lo >> 8) & 0xFF;
+       addr[2] = (addr_lo >> 16) & 0xFF;
+       addr[3] = (addr_lo >> 24) & 0xFF;
+       addr[4] = addr_hi & 0xFF;
+       addr[5] = (addr_hi >> 8) & 0xFF;
+
+       if (!is_valid_ether_addr(addr)) {
+               /* reading mac address from EEPROM or OTP */
+               if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
+                                        addr) == 0) ||
+                   (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
+                                     addr) == 0)) {
+                       if (is_valid_ether_addr(addr)) {
+                               /* eeprom values are valid so use them */
+                               netif_dbg(dev, ifup, dev->net,
+                                         "MAC address read from EEPROM");
+                       } else {
+                               /* generate random MAC */
+                               random_ether_addr(addr);
+                               netif_dbg(dev, ifup, dev->net,
+                                         "MAC address set to random addr");
+                       }
+
+                       addr_lo = addr[0] | (addr[1] << 8) |
+                                 (addr[2] << 16) | (addr[3] << 24);
+                       addr_hi = addr[4] | (addr[5] << 8);
+
+                       ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+                       ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+               } else {
+                       /* generate random MAC */
+                       random_ether_addr(addr);
+                       netif_dbg(dev, ifup, dev->net,
+                                 "MAC address set to random addr");
+               }
+       }
+
+       ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+       ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+
+       ether_addr_copy(dev->net->dev_addr, addr);
+}
+
+static void lan78xx_mii_init(struct lan78xx_net *dev)
+{
+       /* Initialize MII structure */
+       dev->mii.dev = dev->net;
+       dev->mii.mdio_read = lan78xx_mdio_read;
+       dev->mii.mdio_write = lan78xx_mdio_write;
+       dev->mii.phy_id_mask = 0x1f;
+       dev->mii.reg_num_mask = 0x1f;
+       dev->mii.phy_id = INTERNAL_PHY_ID;
+       dev->mii.supports_gmii = true;
+}
+
+static int lan78xx_phy_init(struct lan78xx_net *dev)
+{
+       int temp;
+       struct mii_if_info *mii = &dev->mii;
+
+       if ((!mii->mdio_write) || (!mii->mdio_read))
+               return -EOPNOTSUPP;
+
+       temp = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE);
+       temp |= ADVERTISE_ALL;
+       mii->mdio_write(mii->dev, mii->phy_id, MII_ADVERTISE,
+                       temp | ADVERTISE_CSMA |
+                       ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+
+       /* set to AUTOMDIX */
+       mii->mdio_write(mii->dev, mii->phy_id,
+                       PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_1);
+       temp = mii->mdio_read(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL);
+       temp &= ~PHY_EXT_MODE_CTRL_MDIX_MASK_;
+       mii->mdio_write(mii->dev, mii->phy_id, PHY_EXT_MODE_CTRL,
+                       temp | PHY_EXT_MODE_CTRL_AUTO_MDIX_);
+       mii->mdio_write(mii->dev, mii->phy_id,
+                       PHY_EXT_GPIO_PAGE, PHY_EXT_GPIO_PAGE_SPACE_0);
+       dev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+       /* MAC doesn't support 1000HD */
+       temp = mii->mdio_read(mii->dev, mii->phy_id, MII_CTRL1000);
+       mii->mdio_write(mii->dev, mii->phy_id, MII_CTRL1000,
+                       temp & ~ADVERTISE_1000HALF);
+
+       /* clear interrupt */
+       mii->mdio_read(mii->dev, mii->phy_id, PHY_VTSE_INT_STS);
+       mii->mdio_write(mii->dev, mii->phy_id, PHY_VTSE_INT_MASK,
+                       PHY_VTSE_INT_MASK_MDINTPIN_EN_ |
+                       PHY_VTSE_INT_MASK_LINK_CHANGE_);
+
+       netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
+
+       return 0;
+}
+
+static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
+{
+       int ret = 0;
+       u32 buf;
+       bool rxenabled;
+
+       ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+
+       rxenabled = ((buf & MAC_RX_RXEN_) != 0);
+
+       if (rxenabled) {
+               buf &= ~MAC_RX_RXEN_;
+               ret = lan78xx_write_reg(dev, MAC_RX, buf);
+       }
+
+       /* add 4 to size for FCS */
+       buf &= ~MAC_RX_MAX_SIZE_MASK_;
+       buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
+
+       ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+       if (rxenabled) {
+               buf |= MAC_RX_RXEN_;
+               ret = lan78xx_write_reg(dev, MAC_RX, buf);
+       }
+
+       return 0;
+}
+
+static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
+{
+       struct sk_buff *skb;
+       unsigned long flags;
+       int count = 0;
+
+       spin_lock_irqsave(&q->lock, flags);
+       while (!skb_queue_empty(q)) {
+               struct skb_data *entry;
+               struct urb *urb;
+               int ret;
+
+               skb_queue_walk(q, skb) {
+                       entry = (struct skb_data *)skb->cb;
+                       if (entry->state != unlink_start)
+                               goto found;
+               }
+               break;
+found:
+               entry->state = unlink_start;
+               urb = entry->urb;
+
+               /* Get reference count of the URB to avoid it to be
+                * freed during usb_unlink_urb, which may trigger
+                * use-after-free problem inside usb_unlink_urb since
+                * usb_unlink_urb is always racing with .complete
+                * handler(include defer_bh).
+                */
+               usb_get_urb(urb);
+               spin_unlock_irqrestore(&q->lock, flags);
+               /* during some PM-driven resume scenarios,
+                * these (async) unlinks complete immediately
+                */
+               ret = usb_unlink_urb(urb);
+               if (ret != -EINPROGRESS && ret != 0)
+                       netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
+               else
+                       count++;
+               usb_put_urb(urb);
+               spin_lock_irqsave(&q->lock, flags);
+       }
+       spin_unlock_irqrestore(&q->lock, flags);
+       return count;
+}
+
+static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       int ll_mtu = new_mtu + netdev->hard_header_len;
+       int old_hard_mtu = dev->hard_mtu;
+       int old_rx_urb_size = dev->rx_urb_size;
+       int ret;
+
+       if (new_mtu > MAX_SINGLE_PACKET_SIZE)
+               return -EINVAL;
+
+       if (new_mtu <= 0)
+               return -EINVAL;
+       /* no second zero-length packet read wanted after mtu-sized packets */
+       if ((ll_mtu % dev->maxpacket) == 0)
+               return -EDOM;
+
+       ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
+
+       netdev->mtu = new_mtu;
+
+       dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
+       if (dev->rx_urb_size == old_hard_mtu) {
+               dev->rx_urb_size = dev->hard_mtu;
+               if (dev->rx_urb_size > old_rx_urb_size) {
+                       if (netif_running(dev->net)) {
+                               unlink_urbs(dev, &dev->rxq);
+                               tasklet_schedule(&dev->bh);
+                       }
+               }
+       }
+
+       return 0;
+}
+
+int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       struct sockaddr *addr = p;
+       u32 addr_lo, addr_hi;
+       int ret;
+
+       if (netif_running(netdev))
+               return -EBUSY;
+
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+
+       ether_addr_copy(netdev->dev_addr, addr->sa_data);
+
+       addr_lo = netdev->dev_addr[0] |
+                 netdev->dev_addr[1] << 8 |
+                 netdev->dev_addr[2] << 16 |
+                 netdev->dev_addr[3] << 24;
+       addr_hi = netdev->dev_addr[4] |
+                 netdev->dev_addr[5] << 8;
+
+       ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
+       ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+
+       return 0;
+}
+
+/* Enable or disable Rx checksum offload engine */
+static int lan78xx_set_features(struct net_device *netdev,
+                               netdev_features_t features)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
+
+       if (features & NETIF_F_RXCSUM) {
+               pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
+               pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
+       } else {
+               pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
+               pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
+       }
+
+       if (features & NETIF_F_HW_VLAN_CTAG_RX)
+               pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
+       else
+               pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
+
+       spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
+
+       ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+
+       return 0;
+}
+
+static void lan78xx_deferred_vlan_write(struct work_struct *param)
+{
+       struct lan78xx_priv *pdata =
+                       container_of(param, struct lan78xx_priv, set_vlan);
+       struct lan78xx_net *dev = pdata->dev;
+
+       lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
+                              DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
+}
+
+static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
+                                  __be16 proto, u16 vid)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+       u16 vid_bit_index;
+       u16 vid_dword_index;
+
+       vid_dword_index = (vid >> 5) & 0x7F;
+       vid_bit_index = vid & 0x1F;
+
+       pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
+
+       /* defer register writes to a sleepable context */
+       schedule_work(&pdata->set_vlan);
+
+       return 0;
+}
+
+static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
+                                   __be16 proto, u16 vid)
+{
+       struct lan78xx_net *dev = netdev_priv(netdev);
+       struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+       u16 vid_bit_index;
+       u16 vid_dword_index;
+
+       vid_dword_index = (vid >> 5) & 0x7F;
+       vid_bit_index = vid & 0x1F;
+
+       pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
+
+       /* defer register writes to a sleepable context */
+       schedule_work(&pdata->set_vlan);
+
+       return 0;
+}
+
+static void lan78xx_init_ltm(struct lan78xx_net *dev)
+{
+       int ret;
+       u32 buf;
+       u32 regs[6] = { 0 };
+
+       ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+       if (buf & USB_CFG1_LTM_ENABLE_) {
+               u8 temp[2];
+               /* Get values from EEPROM first */
+               if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
+                       if (temp[0] == 24) {
+                               ret = lan78xx_read_raw_eeprom(dev,
+                                                             temp[1] * 2,
+                                                             24,
+                                                             (u8 *)regs);
+                               if (ret < 0)
+                                       return;
+                       }
+               } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
+                       if (temp[0] == 24) {
+                               ret = lan78xx_read_raw_otp(dev,
+                                                          temp[1] * 2,
+                                                          24,
+                                                          (u8 *)regs);
+                               if (ret < 0)
+                                       return;
+                       }
+               }
+       }
+
+       lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
+       lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
+       lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
+       lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
+       lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
+       lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
+}
+
+static int lan78xx_reset(struct lan78xx_net *dev)
+{
+       struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+       u32 buf;
+       int ret = 0;
+       unsigned long timeout;
+
+       ret = lan78xx_read_reg(dev, HW_CFG, &buf);
+       buf |= HW_CFG_LRST_;
+       ret = lan78xx_write_reg(dev, HW_CFG, buf);
+
+       timeout = jiffies + HZ;
+       do {
+               mdelay(1);
+               ret = lan78xx_read_reg(dev, HW_CFG, &buf);
+               if (time_after(jiffies, timeout)) {
+                       netdev_warn(dev->net,
+                                   "timeout on completion of LiteReset");
+                       return -EIO;
+               }
+       } while (buf & HW_CFG_LRST_);
+
+       lan78xx_init_mac_address(dev);
+
+       /* Respond to the IN token with a NAK */
+       ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
+       buf |= USB_CFG_BIR_;
+       ret = lan78xx_write_reg(dev, USB_CFG0, buf);
+
+       /* Init LTM */
+       lan78xx_init_ltm(dev);
+
+       dev->net->hard_header_len += TX_OVERHEAD;
+       dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
+       if (dev->udev->speed == USB_SPEED_SUPER) {
+               buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
+               dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
+               dev->rx_qlen = 4;
+               dev->tx_qlen = 4;
+       } else if (dev->udev->speed == USB_SPEED_HIGH) {
+               buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
+               dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
+               dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
+               dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
+       } else {
+               buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
+               dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
+               dev->rx_qlen = 4;
+       }
+
+       ret = lan78xx_write_reg(dev, BURST_CAP, buf);
+       ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
+
+       ret = lan78xx_read_reg(dev, HW_CFG, &buf);
+       buf |= HW_CFG_MEF_;
+       ret = lan78xx_write_reg(dev, HW_CFG, buf);
+
+       ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
+       buf |= USB_CFG_BCE_;
+       ret = lan78xx_write_reg(dev, USB_CFG0, buf);
+
+       /* set FIFO sizes */
+       buf = (MAX_RX_FIFO_SIZE - 512) / 512;
+       ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
+
+       buf = (MAX_TX_FIFO_SIZE - 512) / 512;
+       ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
+
+       ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
+       ret = lan78xx_write_reg(dev, FLOW, 0);
+       ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
+
+       /* Don't need rfe_ctl_lock during initialisation */
+       ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
+       pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
+       ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+
+       /* Enable or disable checksum offload engines */
+       lan78xx_set_features(dev->net, dev->net->features);
+
+       lan78xx_set_multicast(dev->net);
+
+       /* reset PHY */
+       ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+       buf |= PMT_CTL_PHY_RST_;
+       ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+       timeout = jiffies + HZ;
+       do {
+               mdelay(1);
+               ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+               if (time_after(jiffies, timeout)) {
+                       netdev_warn(dev->net, "timeout waiting for PHY Reset");
+                       return -EIO;
+               }
+       } while (buf & PMT_CTL_PHY_RST_);
+
+       lan78xx_mii_init(dev);
+
+       ret = lan78xx_phy_init(dev);
+
+       ret = lan78xx_read_reg(dev, MAC_CR, &buf);
+
+       buf |= MAC_CR_GMII_EN_;
+       buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
+
+       ret = lan78xx_write_reg(dev, MAC_CR, buf);
+
+       /* enable on PHY */
+       if (buf & MAC_CR_EEE_EN_)
+               lan78xx_mmd_write(dev->net, dev->mii.phy_id, 0x07, 0x3C, 0x06);
+
+       /* enable PHY interrupts */
+       ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
+       buf |= INT_ENP_PHY_INT;
+       ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
+
+       ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+       buf |= MAC_TX_TXEN_;
+       ret = lan78xx_write_reg(dev, MAC_TX, buf);
+
+       ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
+       buf |= FCT_TX_CTL_EN_;
+       ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
+
+       ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
+
+       ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+       buf |= MAC_RX_RXEN_;
+       ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+       ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
+       buf |= FCT_RX_CTL_EN_;
+       ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
+
+       if (!mii_nway_restart(&dev->mii))
+               netif_dbg(dev, link, dev->net, "autoneg initiated");
+
+       return 0;
+}
+
+static int lan78xx_open(struct net_device *net)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+       int ret;
+
+       ret = usb_autopm_get_interface(dev->intf);
+       if (ret < 0)
+               goto out;
+
+       ret = lan78xx_reset(dev);
+       if (ret < 0)
+               goto done;
+
+       /* for Link Check */
+       if (dev->urb_intr) {
+               ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
+               if (ret < 0) {
+                       netif_err(dev, ifup, dev->net,
+                                 "intr submit %d\n", ret);
+                       goto done;
+               }
+       }
+
+       set_bit(EVENT_DEV_OPEN, &dev->flags);
+
+       netif_start_queue(net);
+
+       dev->link_on = false;
+
+       lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
+done:
+       usb_autopm_put_interface(dev->intf);
+
+out:
+       return ret;
+}
+
+static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
+{
+       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
+       DECLARE_WAITQUEUE(wait, current);
+       int temp;
+
+       /* ensure there are no more active urbs */
+       add_wait_queue(&unlink_wakeup, &wait);
+       set_current_state(TASK_UNINTERRUPTIBLE);
+       dev->wait = &unlink_wakeup;
+       temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
+
+       /* maybe wait for deletions to finish. */
+       while (!skb_queue_empty(&dev->rxq) &&
+              !skb_queue_empty(&dev->txq) &&
+              !skb_queue_empty(&dev->done)) {
+               schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               netif_dbg(dev, ifdown, dev->net,
+                         "waited for %d urb completions\n", temp);
+       }
+       set_current_state(TASK_RUNNING);
+       dev->wait = NULL;
+       remove_wait_queue(&unlink_wakeup, &wait);
+}
+
+int lan78xx_stop(struct net_device *net)
+{
+       struct lan78xx_net              *dev = netdev_priv(net);
+
+       clear_bit(EVENT_DEV_OPEN, &dev->flags);
+       netif_stop_queue(net);
+
+       netif_info(dev, ifdown, dev->net,
+                  "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
+                  net->stats.rx_packets, net->stats.tx_packets,
+                  net->stats.rx_errors, net->stats.tx_errors);
+
+       lan78xx_terminate_urbs(dev);
+
+       usb_kill_urb(dev->urb_intr);
+
+       skb_queue_purge(&dev->rxq_pause);
+
+       /* deferred work (task, timer, softirq) must also stop.
+        * can't flush_scheduled_work() until we drop rtnl (later),
+        * else workers could deadlock; so make workers a NOP.
+        */
+       dev->flags = 0;
+       cancel_delayed_work_sync(&dev->wq);
+       tasklet_kill(&dev->bh);
+
+       usb_autopm_put_interface(dev->intf);
+
+       return 0;
+}
+
+static int lan78xx_linearize(struct sk_buff *skb)
+{
+       return skb_linearize(skb);
+}
+
+static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
+                                      struct sk_buff *skb, gfp_t flags)
+{
+       u32 tx_cmd_a, tx_cmd_b;
+
+       if (skb_headroom(skb) < TX_OVERHEAD) {
+               struct sk_buff *skb2;
+
+               skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
+               dev_kfree_skb_any(skb);
+               skb = skb2;
+               if (!skb)
+                       return NULL;
+       }
+
+       if (lan78xx_linearize(skb) < 0)
+               return NULL;
+
+       tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
+
+       tx_cmd_b = 0;
+       if (skb_is_gso(skb)) {
+               u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
+
+               tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
+
+               tx_cmd_a |= TX_CMD_A_LSO_;
+       }
+
+       if (skb_vlan_tag_present(skb)) {
+               tx_cmd_a |= TX_CMD_A_IVTG_;
+               tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
+       }
+
+       skb_push(skb, 4);
+       cpu_to_le32s(&tx_cmd_b);
+       memcpy(skb->data, &tx_cmd_b, 4);
+
+       skb_push(skb, 4);
+       cpu_to_le32s(&tx_cmd_a);
+       memcpy(skb->data, &tx_cmd_a, 4);
+
+       return skb;
+}
+
+static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
+                              struct sk_buff_head *list, enum skb_state state)
+{
+       unsigned long flags;
+       enum skb_state old_state;
+       struct skb_data *entry = (struct skb_data *)skb->cb;
+
+       spin_lock_irqsave(&list->lock, flags);
+       old_state = entry->state;
+       entry->state = state;
+       if (!list->prev)
+               BUG_ON(!list->prev);
+       if (!list->next)
+               BUG_ON(!list->next);
+       if (!skb->prev || !skb->next)
+               BUG_ON(true);
+
+       __skb_unlink(skb, list);
+       spin_unlock(&list->lock);
+       spin_lock(&dev->done.lock);
+       if (!dev->done.prev)
+               BUG_ON(!dev->done.prev);
+       if (!dev->done.next)
+               BUG_ON(!dev->done.next);
+
+       __skb_queue_tail(&dev->done, skb);
+       if (skb_queue_len(&dev->done) == 1)
+               tasklet_schedule(&dev->bh);
+       spin_unlock_irqrestore(&dev->done.lock, flags);
+
+       return old_state;
+}
+
+static void tx_complete(struct urb *urb)
+{
+       struct sk_buff *skb = (struct sk_buff *)urb->context;
+       struct skb_data *entry = (struct skb_data *)skb->cb;
+       struct lan78xx_net *dev = entry->dev;
+
+       if (urb->status == 0) {
+               dev->net->stats.tx_packets++;
+               dev->net->stats.tx_bytes += entry->length;
+       } else {
+               dev->net->stats.tx_errors++;
+
+               switch (urb->status) {
+               case -EPIPE:
+                       lan78xx_defer_kevent(dev, EVENT_TX_HALT);
+                       break;
+
+               /* software-driven interface shutdown */
+               case -ECONNRESET:
+               case -ESHUTDOWN:
+                       break;
+
+               case -EPROTO:
+               case -ETIME:
+               case -EILSEQ:
+                       netif_stop_queue(dev->net);
+                       break;
+               default:
+                       netif_dbg(dev, tx_err, dev->net,
+                                 "tx err %d\n", entry->urb->status);
+                       break;
+               }
+       }
+
+       usb_autopm_put_interface_async(dev->intf);
+
+       if (skb)
+               defer_bh(dev, skb, &dev->txq, tx_done);
+}
+
+static void lan78xx_queue_skb(struct sk_buff_head *list,
+                             struct sk_buff *newsk, enum skb_state state)
+{
+       struct skb_data *entry = (struct skb_data *)newsk->cb;
+
+       __skb_queue_tail(list, newsk);
+       entry->state = state;
+}
+
+netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+
+       if (skb)
+               skb_tx_timestamp(skb);
+
+       skb = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
+       if (skb) {
+               skb_queue_tail(&dev->txq_pend, skb);
+
+               if (skb_queue_len(&dev->txq_pend) > 10)
+                       netif_stop_queue(net);
+       } else {
+               netif_dbg(dev, tx_err, dev->net,
+                         "lan78xx_tx_prep return NULL\n");
+               dev->net->stats.tx_errors++;
+               dev->net->stats.tx_dropped++;
+       }
+
+       tasklet_schedule(&dev->bh);
+
+       return NETDEV_TX_OK;
+}
+
+int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
+{
+       int tmp;
+       struct usb_host_interface *alt = NULL;
+       struct usb_host_endpoint *in = NULL, *out = NULL;
+       struct usb_host_endpoint *status = NULL;
+
+       for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
+               unsigned ep;
+
+               in = NULL;
+               out = NULL;
+               status = NULL;
+               alt = intf->altsetting + tmp;
+
+               for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
+                       struct usb_host_endpoint *e;
+                       int intr = 0;
+
+                       e = alt->endpoint + ep;
+                       switch (e->desc.bmAttributes) {
+                       case USB_ENDPOINT_XFER_INT:
+                               if (!usb_endpoint_dir_in(&e->desc))
+                                       continue;
+                               intr = 1;
+                               /* FALLTHROUGH */
+                       case USB_ENDPOINT_XFER_BULK:
+                               break;
+                       default:
+                               continue;
+                       }
+                       if (usb_endpoint_dir_in(&e->desc)) {
+                               if (!intr && !in)
+                                       in = e;
+                               else if (intr && !status)
+                                       status = e;
+                       } else {
+                               if (!out)
+                                       out = e;
+                       }
+               }
+               if (in && out)
+                       break;
+       }
+       if (!alt || !in || !out)
+               return -EINVAL;
+
+       dev->pipe_in = usb_rcvbulkpipe(dev->udev,
+                                      in->desc.bEndpointAddress &
+                                      USB_ENDPOINT_NUMBER_MASK);
+       dev->pipe_out = usb_sndbulkpipe(dev->udev,
+                                       out->desc.bEndpointAddress &
+                                       USB_ENDPOINT_NUMBER_MASK);
+       dev->ep_intr = status;
+
+       return 0;
+}
+
+static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
+{
+       struct lan78xx_priv *pdata = NULL;
+       int ret;
+       int i;
+
+       ret = lan78xx_get_endpoints(dev, intf);
+
+       dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
+
+       pdata = (struct lan78xx_priv *)(dev->data[0]);
+       if (!pdata) {
+               netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
+               return -ENOMEM;
+       }
+
+       pdata->dev = dev;
+
+       spin_lock_init(&pdata->rfe_ctl_lock);
+       mutex_init(&pdata->dataport_mutex);
+
+       INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
+
+       for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
+               pdata->vlan_table[i] = 0;
+
+       INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
+
+       dev->net->features = 0;
+
+       if (DEFAULT_TX_CSUM_ENABLE)
+               dev->net->features |= NETIF_F_HW_CSUM;
+
+       if (DEFAULT_RX_CSUM_ENABLE)
+               dev->net->features |= NETIF_F_RXCSUM;
+
+       if (DEFAULT_TSO_CSUM_ENABLE)
+               dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
+
+       dev->net->hw_features = dev->net->features;
+
+       /* Init all registers */
+       ret = lan78xx_reset(dev);
+
+       dev->net->flags |= IFF_MULTICAST;
+
+       pdata->wol = WAKE_MAGIC;
+
+       return 0;
+}
+
+static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
+{
+       struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+
+       if (pdata) {
+               netif_dbg(dev, ifdown, dev->net, "free pdata");
+               kfree(pdata);
+               pdata = NULL;
+               dev->data[0] = 0;
+       }
+}
+
+static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
+                                   struct sk_buff *skb,
+                                   u32 rx_cmd_a, u32 rx_cmd_b)
+{
+       if (!(dev->net->features & NETIF_F_RXCSUM) ||
+           unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
+               skb->ip_summed = CHECKSUM_NONE;
+       } else {
+               skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
+               skb->ip_summed = CHECKSUM_COMPLETE;
+       }
+}
+
+void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
+{
+       int             status;
+
+       if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
+               skb_queue_tail(&dev->rxq_pause, skb);
+               return;
+       }
+
+       skb->protocol = eth_type_trans(skb, dev->net);
+       dev->net->stats.rx_packets++;
+       dev->net->stats.rx_bytes += skb->len;
+
+       netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
+                 skb->len + sizeof(struct ethhdr), skb->protocol);
+       memset(skb->cb, 0, sizeof(struct skb_data));
+
+       if (skb_defer_rx_timestamp(skb))
+               return;
+
+       status = netif_rx(skb);
+       if (status != NET_RX_SUCCESS)
+               netif_dbg(dev, rx_err, dev->net,
+                         "netif_rx status %d\n", status);
+}
+
+static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
+{
+       if (skb->len < dev->net->hard_header_len)
+               return 0;
+
+       while (skb->len > 0) {
+               u32 rx_cmd_a, rx_cmd_b, align_count, size;
+               u16 rx_cmd_c;
+               struct sk_buff *skb2;
+               unsigned char *packet;
+
+               memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
+               le32_to_cpus(&rx_cmd_a);
+               skb_pull(skb, sizeof(rx_cmd_a));
+
+               memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
+               le32_to_cpus(&rx_cmd_b);
+               skb_pull(skb, sizeof(rx_cmd_b));
+
+               memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
+               le16_to_cpus(&rx_cmd_c);
+               skb_pull(skb, sizeof(rx_cmd_c));
+
+               packet = skb->data;
+
+               /* get the packet length */
+               size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
+               align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
+
+               if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
+                       netif_dbg(dev, rx_err, dev->net,
+                                 "Error rx_cmd_a=0x%08x", rx_cmd_a);
+               } else {
+                       /* last frame in this batch */
+                       if (skb->len == size) {
+                               lan78xx_rx_csum_offload(dev, skb,
+                                                       rx_cmd_a, rx_cmd_b);
+
+                               skb_trim(skb, skb->len - 4); /* remove fcs */
+                               skb->truesize = size + sizeof(struct sk_buff);
+
+                               return 1;
+                       }
+
+                       skb2 = skb_clone(skb, GFP_ATOMIC);
+                       if (unlikely(!skb2)) {
+                               netdev_warn(dev->net, "Error allocating skb");
+                               return 0;
+                       }
+
+                       skb2->len = size;
+                       skb2->data = packet;
+                       skb_set_tail_pointer(skb2, size);
+
+                       lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
+
+                       skb_trim(skb2, skb2->len - 4); /* remove fcs */
+                       skb2->truesize = size + sizeof(struct sk_buff);
+
+                       lan78xx_skb_return(dev, skb2);
+               }
+
+               skb_pull(skb, size);
+
+               /* padding bytes before the next frame starts */
+               if (skb->len)
+                       skb_pull(skb, align_count);
+       }
+
+       if (unlikely(skb->len < 0)) {
+               netdev_warn(dev->net, "invalid rx length<0 %d", skb->len);
+               return 0;
+       }
+
+       return 1;
+}
+
+static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
+{
+       if (!lan78xx_rx(dev, skb)) {
+               dev->net->stats.rx_errors++;
+               goto done;
+       }
+
+       if (skb->len) {
+               lan78xx_skb_return(dev, skb);
+               return;
+       }
+
+       netif_dbg(dev, rx_err, dev->net, "drop\n");
+       dev->net->stats.rx_errors++;
+done:
+       skb_queue_tail(&dev->done, skb);
+}
+
+static void rx_complete(struct urb *urb);
+
+static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
+{
+       struct sk_buff *skb;
+       struct skb_data *entry;
+       unsigned long lockflags;
+       size_t size = dev->rx_urb_size;
+       int ret = 0;
+
+       skb = netdev_alloc_skb_ip_align(dev->net, size);
+       if (!skb) {
+               usb_free_urb(urb);
+               return -ENOMEM;
+       }
+
+       entry = (struct skb_data *)skb->cb;
+       entry->urb = urb;
+       entry->dev = dev;
+       entry->length = 0;
+
+       usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
+                         skb->data, size, rx_complete, skb);
+
+       spin_lock_irqsave(&dev->rxq.lock, lockflags);
+
+       if (netif_device_present(dev->net) &&
+           netif_running(dev->net) &&
+           !test_bit(EVENT_RX_HALT, &dev->flags) &&
+           !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+               ret = usb_submit_urb(urb, GFP_ATOMIC);
+               switch (ret) {
+               case 0:
+                       lan78xx_queue_skb(&dev->rxq, skb, rx_start);
+                       break;
+               case -EPIPE:
+                       lan78xx_defer_kevent(dev, EVENT_RX_HALT);
+                       break;
+               case -ENODEV:
+                       netif_dbg(dev, ifdown, dev->net, "device gone\n");
+                       netif_device_detach(dev->net);
+                       break;
+               case -EHOSTUNREACH:
+                       ret = -ENOLINK;
+                       break;
+               default:
+                       netif_dbg(dev, rx_err, dev->net,
+                                 "rx submit, %d\n", ret);
+                       tasklet_schedule(&dev->bh);
+               }
+       } else {
+               netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
+               ret = -ENOLINK;
+       }
+       spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
+       if (ret) {
+               dev_kfree_skb_any(skb);
+               usb_free_urb(urb);
+       }
+       return ret;
+}
+
+static void rx_complete(struct urb *urb)
+{
+       struct sk_buff  *skb = (struct sk_buff *)urb->context;
+       struct skb_data *entry = (struct skb_data *)skb->cb;
+       struct lan78xx_net *dev = entry->dev;
+       int urb_status = urb->status;
+       enum skb_state state;
+
+       skb_put(skb, urb->actual_length);
+       state = rx_done;
+       entry->urb = NULL;
+
+       switch (urb_status) {
+       case 0:
+               if (skb->len < dev->net->hard_header_len) {
+                       state = rx_cleanup;
+                       dev->net->stats.rx_errors++;
+                       dev->net->stats.rx_length_errors++;
+                       netif_dbg(dev, rx_err, dev->net,
+                                 "rx length %d\n", skb->len);
+               }
+               usb_mark_last_busy(dev->udev);
+               break;
+       case -EPIPE:
+               dev->net->stats.rx_errors++;
+               lan78xx_defer_kevent(dev, EVENT_RX_HALT);
+               /* FALLTHROUGH */
+       case -ECONNRESET:                               /* async unlink */
+       case -ESHUTDOWN:                                /* hardware gone */
+               netif_dbg(dev, ifdown, dev->net,
+                         "rx shutdown, code %d\n", urb_status);
+               state = rx_cleanup;
+               entry->urb = urb;
+               urb = NULL;
+               break;
+       case -EPROTO:
+       case -ETIME:
+       case -EILSEQ:
+               dev->net->stats.rx_errors++;
+               state = rx_cleanup;
+               entry->urb = urb;
+               urb = NULL;
+               break;
+
+       /* data overrun ... flush fifo? */
+       case -EOVERFLOW:
+               dev->net->stats.rx_over_errors++;
+               /* FALLTHROUGH */
+
+       default:
+               state = rx_cleanup;
+               dev->net->stats.rx_errors++;
+               netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
+               break;
+       }
+
+       state = defer_bh(dev, skb, &dev->rxq, state);
+
+       if (urb) {
+               if (netif_running(dev->net) &&
+                   !test_bit(EVENT_RX_HALT, &dev->flags) &&
+                   state != unlink_start) {
+                       rx_submit(dev, urb, GFP_ATOMIC);
+                       return;
+               }
+               usb_free_urb(urb);
+       }
+       netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
+}
+
+static void lan78xx_tx_bh(struct lan78xx_net *dev)
+{
+       int length;
+       struct urb *urb = NULL;
+       struct skb_data *entry;
+       unsigned long flags;
+       struct sk_buff_head *tqp = &dev->txq_pend;
+       struct sk_buff *skb, *skb2;
+       int ret;
+       int count, pos;
+       int skb_totallen, pkt_cnt;
+
+       skb_totallen = 0;
+       pkt_cnt = 0;
+       for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
+               if (skb_is_gso(skb)) {
+                       if (pkt_cnt) {
+                               /* handle previous packets first */
+                               break;
+                       }
+                       length = skb->len;
+                       skb2 = skb_dequeue(tqp);
+                       goto gso_skb;
+               }
+
+               if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
+                       break;
+               skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
+               pkt_cnt++;
+       }
+
+       /* copy to a single skb */
+       skb = alloc_skb(skb_totallen, GFP_ATOMIC);
+       if (!skb)
+               goto drop;
+
+       skb_put(skb, skb_totallen);
+
+       for (count = pos = 0; count < pkt_cnt; count++) {
+               skb2 = skb_dequeue(tqp);
+               if (skb2) {
+                       memcpy(skb->data + pos, skb2->data, skb2->len);
+                       pos += roundup(skb2->len, sizeof(u32));
+                       dev_kfree_skb(skb2);
+               } else {
+                       BUG_ON(true);
+               }
+       }
+
+       length = skb_totallen;
+
+gso_skb:
+       urb = usb_alloc_urb(0, GFP_ATOMIC);
+       if (!urb) {
+               netif_dbg(dev, tx_err, dev->net, "no urb\n");
+               goto drop;
+       }
+
+       entry = (struct skb_data *)skb->cb;
+       entry->urb = urb;
+       entry->dev = dev;
+       entry->length = length;
+
+       spin_lock_irqsave(&dev->txq.lock, flags);
+       ret = usb_autopm_get_interface_async(dev->intf);
+       if (ret < 0) {
+               spin_unlock_irqrestore(&dev->txq.lock, flags);
+               goto drop;
+       }
+
+       usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
+                         skb->data, skb->len, tx_complete, skb);
+
+       if (length % dev->maxpacket == 0) {
+               /* send USB_ZERO_PACKET */
+               urb->transfer_flags |= URB_ZERO_PACKET;
+       }
+
+#ifdef CONFIG_PM
+       /* if this triggers the device is still a sleep */
+       if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+               /* transmission will be done in resume */
+               usb_anchor_urb(urb, &dev->deferred);
+               /* no use to process more packets */
+               netif_stop_queue(dev->net);
+               usb_put_urb(urb);
+               spin_unlock_irqrestore(&dev->txq.lock, flags);
+               netdev_dbg(dev->net, "Delaying transmission for resumption\n");
+               return;
+       }
+#endif
+
+       ret = usb_submit_urb(urb, GFP_ATOMIC);
+       switch (ret) {
+       case 0:
+               dev->net->trans_start = jiffies;
+               lan78xx_queue_skb(&dev->txq, skb, tx_start);
+               if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
+                       netif_stop_queue(dev->net);
+               break;
+       case -EPIPE:
+               netif_stop_queue(dev->net);
+               lan78xx_defer_kevent(dev, EVENT_TX_HALT);
+               usb_autopm_put_interface_async(dev->intf);
+               break;
+       default:
+               usb_autopm_put_interface_async(dev->intf);
+               netif_dbg(dev, tx_err, dev->net,
+                         "tx: submit urb err %d\n", ret);
+               break;
+       }
+
+       spin_unlock_irqrestore(&dev->txq.lock, flags);
+
+       if (ret) {
+               netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
+drop:
+               dev->net->stats.tx_dropped++;
+               if (skb)
+                       dev_kfree_skb_any(skb);
+               usb_free_urb(urb);
+       } else
+               netif_dbg(dev, tx_queued, dev->net,
+                         "> tx, len %d, type 0x%x\n", length, skb->protocol);
+}
+
+static void lan78xx_rx_bh(struct lan78xx_net *dev)
+{
+       struct urb *urb;
+       int i;
+
+       if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
+               for (i = 0; i < 10; i++) {
+                       if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
+                               break;
+                       urb = usb_alloc_urb(0, GFP_ATOMIC);
+                       if (urb)
+                               if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
+                                       return;
+               }
+
+               if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
+                       tasklet_schedule(&dev->bh);
+       }
+       if (skb_queue_len(&dev->txq) < dev->tx_qlen)
+               netif_wake_queue(dev->net);
+}
+
+static void lan78xx_bh(unsigned long param)
+{
+       struct lan78xx_net *dev = (struct lan78xx_net *)param;
+       struct sk_buff *skb;
+       struct skb_data *entry;
+
+       if (!dev->done.prev)
+               BUG_ON(!dev->done.prev);
+       if (!dev->done.next)
+               BUG_ON(!dev->done.next);
+
+       while ((skb = skb_dequeue(&dev->done))) {
+               entry = (struct skb_data *)(skb->cb);
+               switch (entry->state) {
+               case rx_done:
+                       entry->state = rx_cleanup;
+                       rx_process(dev, skb);
+                       continue;
+               case tx_done:
+                       usb_free_urb(entry->urb);
+                       dev_kfree_skb(skb);
+                       continue;
+               case rx_cleanup:
+                       usb_free_urb(entry->urb);
+                       dev_kfree_skb(skb);
+                       continue;
+               default:
+                       netdev_dbg(dev->net, "skb state %d\n", entry->state);
+                       return;
+               }
+               if (!dev->done.prev)
+                       BUG_ON(!dev->done.prev);
+               if (!dev->done.next)
+                       BUG_ON(!dev->done.next);
+       }
+
+       if (netif_device_present(dev->net) && netif_running(dev->net)) {
+               if (!skb_queue_empty(&dev->txq_pend))
+                       lan78xx_tx_bh(dev);
+
+               if (!timer_pending(&dev->delay) &&
+                   !test_bit(EVENT_RX_HALT, &dev->flags))
+                       lan78xx_rx_bh(dev);
+       }
+}
+
+static void lan78xx_delayedwork(struct work_struct *work)
+{
+       int status;
+       struct lan78xx_net *dev;
+
+       dev = container_of(work, struct lan78xx_net, wq.work);
+
+       if (test_bit(EVENT_TX_HALT, &dev->flags)) {
+               unlink_urbs(dev, &dev->txq);
+               status = usb_autopm_get_interface(dev->intf);
+               if (status < 0)
+                       goto fail_pipe;
+               status = usb_clear_halt(dev->udev, dev->pipe_out);
+               usb_autopm_put_interface(dev->intf);
+               if (status < 0 &&
+                   status != -EPIPE &&
+                   status != -ESHUTDOWN) {
+                       if (netif_msg_tx_err(dev))
+fail_pipe:
+                               netdev_err(dev->net,
+                                          "can't clear tx halt, status %d\n",
+                                          status);
+               } else {
+                       clear_bit(EVENT_TX_HALT, &dev->flags);
+                       if (status != -ESHUTDOWN)
+                               netif_wake_queue(dev->net);
+               }
+       }
+       if (test_bit(EVENT_RX_HALT, &dev->flags)) {
+               unlink_urbs(dev, &dev->rxq);
+               status = usb_autopm_get_interface(dev->intf);
+               if (status < 0)
+                               goto fail_halt;
+               status = usb_clear_halt(dev->udev, dev->pipe_in);
+               usb_autopm_put_interface(dev->intf);
+               if (status < 0 &&
+                   status != -EPIPE &&
+                   status != -ESHUTDOWN) {
+                       if (netif_msg_rx_err(dev))
+fail_halt:
+                               netdev_err(dev->net,
+                                          "can't clear rx halt, status %d\n",
+                                          status);
+               } else {
+                       clear_bit(EVENT_RX_HALT, &dev->flags);
+                       tasklet_schedule(&dev->bh);
+               }
+       }
+
+       if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
+               int ret = 0;
+
+               clear_bit(EVENT_LINK_RESET, &dev->flags);
+               status = usb_autopm_get_interface(dev->intf);
+               if (status < 0)
+                       goto skip_reset;
+               if (lan78xx_link_reset(dev) < 0) {
+                       usb_autopm_put_interface(dev->intf);
+skip_reset:
+                       netdev_info(dev->net, "link reset failed (%d)\n",
+                                   ret);
+               } else {
+                       usb_autopm_put_interface(dev->intf);
+               }
+       }
+}
+
+static void intr_complete(struct urb *urb)
+{
+       struct lan78xx_net *dev = urb->context;
+       int status = urb->status;
+
+       switch (status) {
+       /* success */
+       case 0:
+               lan78xx_status(dev, urb);
+               break;
+
+       /* software-driven interface shutdown */
+       case -ENOENT:                   /* urb killed */
+       case -ESHUTDOWN:                /* hardware gone */
+               netif_dbg(dev, ifdown, dev->net,
+                         "intr shutdown, code %d\n", status);
+               return;
+
+       /* NOTE:  not throttling like RX/TX, since this endpoint
+        * already polls infrequently
+        */
+       default:
+               netdev_dbg(dev->net, "intr status %d\n", status);
+               break;
+       }
+
+       if (!netif_running(dev->net))
+               return;
+
+       memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
+       status = usb_submit_urb(urb, GFP_ATOMIC);
+       if (status != 0)
+               netif_err(dev, timer, dev->net,
+                         "intr resubmit --> %d\n", status);
+}
+
+static void lan78xx_disconnect(struct usb_interface *intf)
+{
+       struct lan78xx_net              *dev;
+       struct usb_device               *udev;
+       struct net_device               *net;
+
+       dev = usb_get_intfdata(intf);
+       usb_set_intfdata(intf, NULL);
+       if (!dev)
+               return;
+
+       udev = interface_to_usbdev(intf);
+
+       net = dev->net;
+       unregister_netdev(net);
+
+       cancel_delayed_work_sync(&dev->wq);
+
+       usb_scuttle_anchored_urbs(&dev->deferred);
+
+       lan78xx_unbind(dev, intf);
+
+       usb_kill_urb(dev->urb_intr);
+       usb_free_urb(dev->urb_intr);
+
+       free_netdev(net);
+       usb_put_dev(udev);
+}
+
+void lan78xx_tx_timeout(struct net_device *net)
+{
+       struct lan78xx_net *dev = netdev_priv(net);
+
+       unlink_urbs(dev, &dev->txq);
+       tasklet_schedule(&dev->bh);
+}
+
+static const struct net_device_ops lan78xx_netdev_ops = {
+       .ndo_open               = lan78xx_open,
+       .ndo_stop               = lan78xx_stop,
+       .ndo_start_xmit         = lan78xx_start_xmit,
+       .ndo_tx_timeout         = lan78xx_tx_timeout,
+       .ndo_change_mtu         = lan78xx_change_mtu,
+       .ndo_set_mac_address    = lan78xx_set_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_do_ioctl           = lan78xx_ioctl,
+       .ndo_set_rx_mode        = lan78xx_set_multicast,
+       .ndo_set_features       = lan78xx_set_features,
+       .ndo_vlan_rx_add_vid    = lan78xx_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = lan78xx_vlan_rx_kill_vid,
+};
+
+static int lan78xx_probe(struct usb_interface *intf,
+                        const struct usb_device_id *id)
+{
+       struct lan78xx_net *dev;
+       struct net_device *netdev;
+       struct usb_device *udev;
+       int ret;
+       unsigned maxp;
+       unsigned period;
+       u8 *buf = NULL;
+
+       udev = interface_to_usbdev(intf);
+       udev = usb_get_dev(udev);
+
+       ret = -ENOMEM;
+       netdev = alloc_etherdev(sizeof(struct lan78xx_net));
+       if (!netdev) {
+                       dev_err(&intf->dev, "Error: OOM\n");
+                       goto out1;
+       }
+
+       /* netdev_printk() needs this */
+       SET_NETDEV_DEV(netdev, &intf->dev);
+
+       dev = netdev_priv(netdev);
+       dev->udev = udev;
+       dev->intf = intf;
+       dev->net = netdev;
+       dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
+                                       | NETIF_MSG_PROBE | NETIF_MSG_LINK);
+
+       skb_queue_head_init(&dev->rxq);
+       skb_queue_head_init(&dev->txq);
+       skb_queue_head_init(&dev->done);
+       skb_queue_head_init(&dev->rxq_pause);
+       skb_queue_head_init(&dev->txq_pend);
+       mutex_init(&dev->phy_mutex);
+
+       tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
+       INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
+       init_usb_anchor(&dev->deferred);
+
+       netdev->netdev_ops = &lan78xx_netdev_ops;
+       netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
+       netdev->ethtool_ops = &lan78xx_ethtool_ops;
+
+       ret = lan78xx_bind(dev, intf);
+       if (ret < 0)
+               goto out2;
+       strcpy(netdev->name, "eth%d");
+
+       if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
+               netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
+
+       dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
+       dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
+       dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
+
+       dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
+       dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
+
+       dev->pipe_intr = usb_rcvintpipe(dev->udev,
+                                       dev->ep_intr->desc.bEndpointAddress &
+                                       USB_ENDPOINT_NUMBER_MASK);
+       period = dev->ep_intr->desc.bInterval;
+
+       maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
+       buf = kmalloc(maxp, GFP_KERNEL);
+       if (buf) {
+               dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
+               if (!dev->urb_intr) {
+                       kfree(buf);
+                       goto out3;
+               } else {
+                       usb_fill_int_urb(dev->urb_intr, dev->udev,
+                                        dev->pipe_intr, buf, maxp,
+                                        intr_complete, dev, period);
+               }
+       }
+
+       dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
+
+       /* driver requires remote-wakeup capability during autosuspend. */
+       intf->needs_remote_wakeup = 1;
+
+       ret = register_netdev(netdev);
+       if (ret != 0) {
+               netif_err(dev, probe, netdev, "couldn't register the device\n");
+               goto out2;
+       }
+
+       usb_set_intfdata(intf, dev);
+
+       ret = device_set_wakeup_enable(&udev->dev, true);
+
+        /* Default delay of 2sec has more overhead than advantage.
+         * Set to 10sec as default.
+         */
+       pm_runtime_set_autosuspend_delay(&udev->dev,
+                                        DEFAULT_AUTOSUSPEND_DELAY);
+
+       return 0;
+
+       usb_set_intfdata(intf, NULL);
+out3:
+       lan78xx_unbind(dev, intf);
+out2:
+       free_netdev(netdev);
+out1:
+       usb_put_dev(udev);
+
+       return ret;
+}
+
+static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
+{
+       const u16 crc16poly = 0x8005;
+       int i;
+       u16 bit, crc, msb;
+       u8 data;
+
+       crc = 0xFFFF;
+       for (i = 0; i < len; i++) {
+               data = *buf++;
+               for (bit = 0; bit < 8; bit++) {
+                       msb = crc >> 15;
+                       crc <<= 1;
+
+                       if (msb ^ (u16)(data & 1)) {
+                               crc ^= crc16poly;
+                               crc |= (u16)0x0001U;
+                       }
+                       data >>= 1;
+               }
+       }
+
+       return crc;
+}
+
+static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
+{
+       u32 buf;
+       int ret;
+       int mask_index;
+       u16 crc;
+       u32 temp_wucsr;
+       u32 temp_pmt_ctl;
+       const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
+       const u8 ipv6_multicast[3] = { 0x33, 0x33 };
+       const u8 arp_type[2] = { 0x08, 0x06 };
+
+       ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+       buf &= ~MAC_TX_TXEN_;
+       ret = lan78xx_write_reg(dev, MAC_TX, buf);
+       ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+       buf &= ~MAC_RX_RXEN_;
+       ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+       ret = lan78xx_write_reg(dev, WUCSR, 0);
+       ret = lan78xx_write_reg(dev, WUCSR2, 0);
+       ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+
+       temp_wucsr = 0;
+
+       temp_pmt_ctl = 0;
+       ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
+       temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
+       temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
+
+       for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
+               ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
+
+       mask_index = 0;
+       if (wol & WAKE_PHY) {
+               temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
+
+               temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+               temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+               temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+       }
+       if (wol & WAKE_MAGIC) {
+               temp_wucsr |= WUCSR_MPEN_;
+
+               temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+               temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+               temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
+       }
+       if (wol & WAKE_BCAST) {
+               temp_wucsr |= WUCSR_BCST_EN_;
+
+               temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+               temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+               temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+       }
+       if (wol & WAKE_MCAST) {
+               temp_wucsr |= WUCSR_WAKE_EN_;
+
+               /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
+               crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
+               ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+                                       WUF_CFGX_EN_ |
+                                       WUF_CFGX_TYPE_MCAST_ |
+                                       (0 << WUF_CFGX_OFFSET_SHIFT_) |
+                                       (crc & WUF_CFGX_CRC16_MASK_));
+
+               ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
+               ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+               ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+               ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+               mask_index++;
+
+               /* for IPv6 Multicast */
+               crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
+               ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+                                       WUF_CFGX_EN_ |
+                                       WUF_CFGX_TYPE_MCAST_ |
+                                       (0 << WUF_CFGX_OFFSET_SHIFT_) |
+                                       (crc & WUF_CFGX_CRC16_MASK_));
+
+               ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
+               ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+               ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+               ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+               mask_index++;
+
+               temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+               temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+               temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+       }
+       if (wol & WAKE_UCAST) {
+               temp_wucsr |= WUCSR_PFDA_EN_;
+
+               temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+               temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+               temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+       }
+       if (wol & WAKE_ARP) {
+               temp_wucsr |= WUCSR_WAKE_EN_;
+
+               /* set WUF_CFG & WUF_MASK
+                * for packettype (offset 12,13) = ARP (0x0806)
+                */
+               crc = lan78xx_wakeframe_crc16(arp_type, 2);
+               ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
+                                       WUF_CFGX_EN_ |
+                                       WUF_CFGX_TYPE_ALL_ |
+                                       (0 << WUF_CFGX_OFFSET_SHIFT_) |
+                                       (crc & WUF_CFGX_CRC16_MASK_));
+
+               ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
+               ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
+               ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
+               ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
+               mask_index++;
+
+               temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+               temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+               temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+       }
+
+       ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
+
+       /* when multiple WOL bits are set */
+       if (hweight_long((unsigned long)wol) > 1) {
+               temp_pmt_ctl |= PMT_CTL_WOL_EN_;
+               temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
+               temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
+       }
+       ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
+
+       /* clear WUPS */
+       ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+       buf |= PMT_CTL_WUPS_MASK_;
+       ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+       ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+       buf |= MAC_RX_RXEN_;
+       ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+       return 0;
+}
+
+int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
+{
+       struct lan78xx_net *dev = usb_get_intfdata(intf);
+       struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
+       u32 buf;
+       int ret;
+       int event;
+
+       ret = 0;
+       event = message.event;
+
+       if (!dev->suspend_count++) {
+               spin_lock_irq(&dev->txq.lock);
+               /* don't autosuspend while transmitting */
+               if ((skb_queue_len(&dev->txq) ||
+                    skb_queue_len(&dev->txq_pend)) &&
+                       PMSG_IS_AUTO(message)) {
+                       spin_unlock_irq(&dev->txq.lock);
+                       ret = -EBUSY;
+                       goto out;
+               } else {
+                       set_bit(EVENT_DEV_ASLEEP, &dev->flags);
+                       spin_unlock_irq(&dev->txq.lock);
+               }
+
+               /* stop TX & RX */
+               ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+               buf &= ~MAC_TX_TXEN_;
+               ret = lan78xx_write_reg(dev, MAC_TX, buf);
+               ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+               buf &= ~MAC_RX_RXEN_;
+               ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+               /* empty out the rx and queues */
+               netif_device_detach(dev->net);
+               lan78xx_terminate_urbs(dev);
+               usb_kill_urb(dev->urb_intr);
+
+               /* reattach */
+               netif_device_attach(dev->net);
+       }
+
+       if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
+               if (PMSG_IS_AUTO(message)) {
+                       /* auto suspend (selective suspend) */
+                       ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+                       buf &= ~MAC_TX_TXEN_;
+                       ret = lan78xx_write_reg(dev, MAC_TX, buf);
+                       ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+                       buf &= ~MAC_RX_RXEN_;
+                       ret = lan78xx_write_reg(dev, MAC_RX, buf);
+
+                       ret = lan78xx_write_reg(dev, WUCSR, 0);
+                       ret = lan78xx_write_reg(dev, WUCSR2, 0);
+                       ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+
+                       /* set goodframe wakeup */
+                       ret = lan78xx_read_reg(dev, WUCSR, &buf);
+
+                       buf |= WUCSR_RFE_WAKE_EN_;
+                       buf |= WUCSR_STORE_WAKE_;
+
+                       ret = lan78xx_write_reg(dev, WUCSR, buf);
+
+                       ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+
+                       buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
+                       buf |= PMT_CTL_RES_CLR_WKP_STS_;
+
+                       buf |= PMT_CTL_PHY_WAKE_EN_;
+                       buf |= PMT_CTL_WOL_EN_;
+                       buf &= ~PMT_CTL_SUS_MODE_MASK_;
+                       buf |= PMT_CTL_SUS_MODE_3_;
+
+                       ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+                       ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
+
+                       buf |= PMT_CTL_WUPS_MASK_;
+
+                       ret = lan78xx_write_reg(dev, PMT_CTL, buf);
+
+                       ret = lan78xx_read_reg(dev, MAC_RX, &buf);
+                       buf |= MAC_RX_RXEN_;
+                       ret = lan78xx_write_reg(dev, MAC_RX, buf);
+               } else {
+                       lan78xx_set_suspend(dev, pdata->wol);
+               }
+       }
+
+out:
+       return ret;
+}
+
+int lan78xx_resume(struct usb_interface *intf)
+{
+       struct lan78xx_net *dev = usb_get_intfdata(intf);
+       struct sk_buff *skb;
+       struct urb *res;
+       int ret;
+       u32 buf;
+
+       if (!--dev->suspend_count) {
+               /* resume interrupt URBs */
+               if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
+                               usb_submit_urb(dev->urb_intr, GFP_NOIO);
+
+               spin_lock_irq(&dev->txq.lock);
+               while ((res = usb_get_from_anchor(&dev->deferred))) {
+                       skb = (struct sk_buff *)res->context;
+                       ret = usb_submit_urb(res, GFP_ATOMIC);
+                       if (ret < 0) {
+                               dev_kfree_skb_any(skb);
+                               usb_free_urb(res);
+                               usb_autopm_put_interface_async(dev->intf);
+                       } else {
+                               dev->net->trans_start = jiffies;
+                               lan78xx_queue_skb(&dev->txq, skb, tx_start);
+                       }
+               }
+
+               clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
+               spin_unlock_irq(&dev->txq.lock);
+
+               if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
+                       if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
+                               netif_start_queue(dev->net);
+                       tasklet_schedule(&dev->bh);
+               }
+       }
+
+       ret = lan78xx_write_reg(dev, WUCSR2, 0);
+       ret = lan78xx_write_reg(dev, WUCSR, 0);
+       ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
+
+       ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
+                                            WUCSR2_ARP_RCD_ |
+                                            WUCSR2_IPV6_TCPSYN_RCD_ |
+                                            WUCSR2_IPV4_TCPSYN_RCD_);
+
+       ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
+                                           WUCSR_EEE_RX_WAKE_ |
+                                           WUCSR_PFDA_FR_ |
+                                           WUCSR_RFE_WAKE_FR_ |
+                                           WUCSR_WUFR_ |
+                                           WUCSR_MPR_ |
+                                           WUCSR_BCST_FR_);
+
+       ret = lan78xx_read_reg(dev, MAC_TX, &buf);
+       buf |= MAC_TX_TXEN_;
+       ret = lan78xx_write_reg(dev, MAC_TX, buf);
+
+       return 0;
+}
+
+int lan78xx_reset_resume(struct usb_interface *intf)
+{
+       struct lan78xx_net *dev = usb_get_intfdata(intf);
+
+       lan78xx_reset(dev);
+       return lan78xx_resume(intf);
+}
+
+static const struct usb_device_id products[] = {
+       {
+       /* LAN7800 USB Gigabit Ethernet Device */
+       USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
+       },
+       {
+       /* LAN7850 USB Gigabit Ethernet Device */
+       USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver lan78xx_driver = {
+       .name                   = DRIVER_NAME,
+       .id_table               = products,
+       .probe                  = lan78xx_probe,
+       .disconnect             = lan78xx_disconnect,
+       .suspend                = lan78xx_suspend,
+       .resume                 = lan78xx_resume,
+       .reset_resume           = lan78xx_reset_resume,
+       .supports_autosuspend   = 1,
+       .disable_hub_initiated_lpm = 1,
+};
+
+module_usb_driver(lan78xx_driver);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/lan78xx.h b/drivers/net/usb/lan78xx.h
new file mode 100644 (file)
index 0000000..ae7562e
--- /dev/null
@@ -0,0 +1,1069 @@
+/*
+ * Copyright (C) 2015 Microchip Technology
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef _LAN78XX_H
+#define _LAN78XX_H
+
+/* USB Vendor Requests */
+#define USB_VENDOR_REQUEST_WRITE_REGISTER      0xA0
+#define USB_VENDOR_REQUEST_READ_REGISTER       0xA1
+#define USB_VENDOR_REQUEST_GET_STATS           0xA2
+
+/* Interrupt Endpoint status word bitfields */
+#define INT_ENP_EEE_START_TX_LPI_INT           BIT(26)
+#define INT_ENP_EEE_STOP_TX_LPI_INT            BIT(25)
+#define INT_ENP_EEE_RX_LPI_INT                 BIT(24)
+#define INT_ENP_RDFO_INT                       BIT(22)
+#define INT_ENP_TXE_INT                                BIT(21)
+#define INT_ENP_TX_DIS_INT                     BIT(19)
+#define INT_ENP_RX_DIS_INT                     BIT(18)
+#define INT_ENP_PHY_INT                                BIT(17)
+#define INT_ENP_DP_INT                         BIT(16)
+#define INT_ENP_MAC_ERR_INT                    BIT(15)
+#define INT_ENP_TDFU_INT                       BIT(14)
+#define INT_ENP_TDFO_INT                       BIT(13)
+#define INT_ENP_UTX_FP_INT                     BIT(12)
+
+#define TX_PKT_ALIGNMENT                       4
+#define RX_PKT_ALIGNMENT                       4
+
+/* Tx Command A */
+#define TX_CMD_A_IGE_                  (0x20000000)
+#define TX_CMD_A_ICE_                  (0x10000000)
+#define TX_CMD_A_LSO_                  (0x08000000)
+#define TX_CMD_A_IPE_                  (0x04000000)
+#define TX_CMD_A_TPE_                  (0x02000000)
+#define TX_CMD_A_IVTG_                 (0x01000000)
+#define TX_CMD_A_RVTG_                 (0x00800000)
+#define TX_CMD_A_FCS_                  (0x00400000)
+#define TX_CMD_A_LEN_MASK_             (0x000FFFFF)
+
+/* Tx Command B */
+#define TX_CMD_B_MSS_SHIFT_            (16)
+#define TX_CMD_B_MSS_MASK_             (0x3FFF0000)
+#define TX_CMD_B_MSS_MIN_              ((unsigned short)8)
+#define TX_CMD_B_VTAG_MASK_            (0x0000FFFF)
+#define TX_CMD_B_VTAG_PRI_MASK_                (0x0000E000)
+#define TX_CMD_B_VTAG_CFI_MASK_                (0x00001000)
+#define TX_CMD_B_VTAG_VID_MASK_                (0x00000FFF)
+
+/* Rx Command A */
+#define RX_CMD_A_ICE_                  (0x80000000)
+#define RX_CMD_A_TCE_                  (0x40000000)
+#define RX_CMD_A_CSE_MASK_             (0xC0000000)
+#define RX_CMD_A_IPV_                  (0x20000000)
+#define RX_CMD_A_PID_MASK_             (0x18000000)
+#define RX_CMD_A_PID_NONE_IP_          (0x00000000)
+#define RX_CMD_A_PID_TCP_IP_           (0x08000000)
+#define RX_CMD_A_PID_UDP_IP_           (0x10000000)
+#define RX_CMD_A_PID_IP_               (0x18000000)
+#define RX_CMD_A_PFF_                  (0x04000000)
+#define RX_CMD_A_BAM_                  (0x02000000)
+#define RX_CMD_A_MAM_                  (0x01000000)
+#define RX_CMD_A_FVTG_                 (0x00800000)
+#define RX_CMD_A_RED_                  (0x00400000)
+#define RX_CMD_A_RX_ERRS_MASK_         (0xC03F0000)
+#define RX_CMD_A_RWT_                  (0x00200000)
+#define RX_CMD_A_RUNT_                 (0x00100000)
+#define RX_CMD_A_LONG_                 (0x00080000)
+#define RX_CMD_A_RXE_                  (0x00040000)
+#define RX_CMD_A_DRB_                  (0x00020000)
+#define RX_CMD_A_FCS_                  (0x00010000)
+#define RX_CMD_A_UAM_                  (0x00008000)
+#define RX_CMD_A_ICSM_                 (0x00004000)
+#define RX_CMD_A_LEN_MASK_             (0x00003FFF)
+
+/* Rx Command B */
+#define RX_CMD_B_CSUM_SHIFT_           (16)
+#define RX_CMD_B_CSUM_MASK_            (0xFFFF0000)
+#define RX_CMD_B_VTAG_MASK_            (0x0000FFFF)
+#define RX_CMD_B_VTAG_PRI_MASK_                (0x0000E000)
+#define RX_CMD_B_VTAG_CFI_MASK_                (0x00001000)
+#define RX_CMD_B_VTAG_VID_MASK_                (0x00000FFF)
+
+/* Rx Command C */
+#define RX_CMD_C_WAKE_SHIFT_           (15)
+#define RX_CMD_C_WAKE_                 (0x8000)
+#define RX_CMD_C_REF_FAIL_SHIFT_       (14)
+#define RX_CMD_C_REF_FAIL_             (0x4000)
+
+/* SCSRs */
+#define NUMBER_OF_REGS                 (193)
+
+#define ID_REV                         (0x00)
+#define ID_REV_CHIP_ID_MASK_           (0xFFFF0000)
+#define ID_REV_CHIP_REV_MASK_          (0x0000FFFF)
+#define ID_REV_CHIP_ID_7800_           (0x7800)
+
+#define FPGA_REV                       (0x04)
+#define FPGA_REV_MINOR_MASK_           (0x0000FF00)
+#define FPGA_REV_MAJOR_MASK_           (0x000000FF)
+
+#define INT_STS                                (0x0C)
+#define INT_STS_CLEAR_ALL_             (0xFFFFFFFF)
+#define INT_STS_EEE_TX_LPI_STRT_       (0x04000000)
+#define INT_STS_EEE_TX_LPI_STOP_       (0x02000000)
+#define INT_STS_EEE_RX_LPI_            (0x01000000)
+#define INT_STS_RDFO_                  (0x00400000)
+#define INT_STS_TXE_                   (0x00200000)
+#define INT_STS_TX_DIS_                        (0x00080000)
+#define INT_STS_RX_DIS_                        (0x00040000)
+#define INT_STS_PHY_INT_               (0x00020000)
+#define INT_STS_DP_INT_                        (0x00010000)
+#define INT_STS_MAC_ERR_               (0x00008000)
+#define INT_STS_TDFU_                  (0x00004000)
+#define INT_STS_TDFO_                  (0x00002000)
+#define INT_STS_UFX_FP_                        (0x00001000)
+#define INT_STS_GPIO_MASK_             (0x00000FFF)
+#define INT_STS_GPIO11_                        (0x00000800)
+#define INT_STS_GPIO10_                        (0x00000400)
+#define INT_STS_GPIO9_                 (0x00000200)
+#define INT_STS_GPIO8_                 (0x00000100)
+#define INT_STS_GPIO7_                 (0x00000080)
+#define INT_STS_GPIO6_                 (0x00000040)
+#define INT_STS_GPIO5_                 (0x00000020)
+#define INT_STS_GPIO4_                 (0x00000010)
+#define INT_STS_GPIO3_                 (0x00000008)
+#define INT_STS_GPIO2_                 (0x00000004)
+#define INT_STS_GPIO1_                 (0x00000002)
+#define INT_STS_GPIO0_                 (0x00000001)
+
+#define HW_CFG                         (0x010)
+#define HW_CFG_CLK125_EN_              (0x02000000)
+#define HW_CFG_REFCLK25_EN_            (0x01000000)
+#define HW_CFG_LED3_EN_                        (0x00800000)
+#define HW_CFG_LED2_EN_                        (0x00400000)
+#define HW_CFG_LED1_EN_                        (0x00200000)
+#define HW_CFG_LED0_EN_                        (0x00100000)
+#define HW_CFG_EEE_PHY_LUSU_           (0x00020000)
+#define HW_CFG_EEE_TSU_                        (0x00010000)
+#define HW_CFG_NETDET_STS_             (0x00008000)
+#define HW_CFG_NETDET_EN_              (0x00004000)
+#define HW_CFG_EEM_                    (0x00002000)
+#define HW_CFG_RST_PROTECT_            (0x00001000)
+#define HW_CFG_CONNECT_BUF_            (0x00000400)
+#define HW_CFG_CONNECT_EN_             (0x00000200)
+#define HW_CFG_CONNECT_POL_            (0x00000100)
+#define HW_CFG_SUSPEND_N_SEL_MASK_     (0x000000C0)
+#define HW_CFG_SUSPEND_N_SEL_2         (0x00000000)
+#define HW_CFG_SUSPEND_N_SEL_12N       (0x00000040)
+#define HW_CFG_SUSPEND_N_SEL_012N      (0x00000080)
+#define HW_CFG_SUSPEND_N_SEL_0123N     (0x000000C0)
+#define HW_CFG_SUSPEND_N_POL_          (0x00000020)
+#define HW_CFG_MEF_                    (0x00000010)
+#define HW_CFG_ETC_                    (0x00000008)
+#define HW_CFG_LRST_                   (0x00000002)
+#define HW_CFG_SRST_                   (0x00000001)
+
+#define PMT_CTL                                (0x014)
+#define PMT_CTL_EEE_WAKEUP_EN_         (0x00002000)
+#define PMT_CTL_EEE_WUPS_              (0x00001000)
+#define PMT_CTL_MAC_SRST_              (0x00000800)
+#define PMT_CTL_PHY_PWRUP_             (0x00000400)
+#define PMT_CTL_RES_CLR_WKP_MASK_      (0x00000300)
+#define PMT_CTL_RES_CLR_WKP_STS_       (0x00000200)
+#define PMT_CTL_RES_CLR_WKP_EN_                (0x00000100)
+#define PMT_CTL_READY_                 (0x00000080)
+#define PMT_CTL_SUS_MODE_MASK_         (0x00000060)
+#define PMT_CTL_SUS_MODE_0_            (0x00000000)
+#define PMT_CTL_SUS_MODE_1_            (0x00000020)
+#define PMT_CTL_SUS_MODE_2_            (0x00000040)
+#define PMT_CTL_SUS_MODE_3_            (0x00000060)
+#define PMT_CTL_PHY_RST_               (0x00000010)
+#define PMT_CTL_WOL_EN_                        (0x00000008)
+#define PMT_CTL_PHY_WAKE_EN_           (0x00000004)
+#define PMT_CTL_WUPS_MASK_             (0x00000003)
+#define PMT_CTL_WUPS_MLT_              (0x00000003)
+#define PMT_CTL_WUPS_MAC_              (0x00000002)
+#define PMT_CTL_WUPS_PHY_              (0x00000001)
+
+#define GPIO_CFG0                      (0x018)
+#define GPIO_CFG0_GPIOEN_MASK_         (0x0000F000)
+#define GPIO_CFG0_GPIOEN3_             (0x00008000)
+#define GPIO_CFG0_GPIOEN2_             (0x00004000)
+#define GPIO_CFG0_GPIOEN1_             (0x00002000)
+#define GPIO_CFG0_GPIOEN0_             (0x00001000)
+#define GPIO_CFG0_GPIOBUF_MASK_                (0x00000F00)
+#define GPIO_CFG0_GPIOBUF3_            (0x00000800)
+#define GPIO_CFG0_GPIOBUF2_            (0x00000400)
+#define GPIO_CFG0_GPIOBUF1_            (0x00000200)
+#define GPIO_CFG0_GPIOBUF0_            (0x00000100)
+#define GPIO_CFG0_GPIODIR_MASK_                (0x000000F0)
+#define GPIO_CFG0_GPIODIR3_            (0x00000080)
+#define GPIO_CFG0_GPIODIR2_            (0x00000040)
+#define GPIO_CFG0_GPIODIR1_            (0x00000020)
+#define GPIO_CFG0_GPIODIR0_            (0x00000010)
+#define GPIO_CFG0_GPIOD_MASK_          (0x0000000F)
+#define GPIO_CFG0_GPIOD3_              (0x00000008)
+#define GPIO_CFG0_GPIOD2_              (0x00000004)
+#define GPIO_CFG0_GPIOD1_              (0x00000002)
+#define GPIO_CFG0_GPIOD0_              (0x00000001)
+
+#define GPIO_CFG1                      (0x01C)
+#define GPIO_CFG1_GPIOEN_MASK_         (0xFF000000)
+#define GPIO_CFG1_GPIOEN11_            (0x80000000)
+#define GPIO_CFG1_GPIOEN10_            (0x40000000)
+#define GPIO_CFG1_GPIOEN9_             (0x20000000)
+#define GPIO_CFG1_GPIOEN8_             (0x10000000)
+#define GPIO_CFG1_GPIOEN7_             (0x08000000)
+#define GPIO_CFG1_GPIOEN6_             (0x04000000)
+#define GPIO_CFG1_GPIOEN5_             (0x02000000)
+#define GPIO_CFG1_GPIOEN4_             (0x01000000)
+#define GPIO_CFG1_GPIOBUF_MASK_                (0x00FF0000)
+#define GPIO_CFG1_GPIOBUF11_           (0x00800000)
+#define GPIO_CFG1_GPIOBUF10_           (0x00400000)
+#define GPIO_CFG1_GPIOBUF9_            (0x00200000)
+#define GPIO_CFG1_GPIOBUF8_            (0x00100000)
+#define GPIO_CFG1_GPIOBUF7_            (0x00080000)
+#define GPIO_CFG1_GPIOBUF6_            (0x00040000)
+#define GPIO_CFG1_GPIOBUF5_            (0x00020000)
+#define GPIO_CFG1_GPIOBUF4_            (0x00010000)
+#define GPIO_CFG1_GPIODIR_MASK_                (0x0000FF00)
+#define GPIO_CFG1_GPIODIR11_           (0x00008000)
+#define GPIO_CFG1_GPIODIR10_           (0x00004000)
+#define GPIO_CFG1_GPIODIR9_            (0x00002000)
+#define GPIO_CFG1_GPIODIR8_            (0x00001000)
+#define GPIO_CFG1_GPIODIR7_            (0x00000800)
+#define GPIO_CFG1_GPIODIR6_            (0x00000400)
+#define GPIO_CFG1_GPIODIR5_            (0x00000200)
+#define GPIO_CFG1_GPIODIR4_            (0x00000100)
+#define GPIO_CFG1_GPIOD_MASK_          (0x000000FF)
+#define GPIO_CFG1_GPIOD11_             (0x00000080)
+#define GPIO_CFG1_GPIOD10_             (0x00000040)
+#define GPIO_CFG1_GPIOD9_              (0x00000020)
+#define GPIO_CFG1_GPIOD8_              (0x00000010)
+#define GPIO_CFG1_GPIOD7_              (0x00000008)
+#define GPIO_CFG1_GPIOD6_              (0x00000004)
+#define GPIO_CFG1_GPIOD6_              (0x00000004)
+#define GPIO_CFG1_GPIOD5_              (0x00000002)
+#define GPIO_CFG1_GPIOD4_              (0x00000001)
+
+#define GPIO_WAKE                      (0x020)
+#define GPIO_WAKE_GPIOPOL_MASK_                (0x0FFF0000)
+#define GPIO_WAKE_GPIOPOL11_           (0x08000000)
+#define GPIO_WAKE_GPIOPOL10_           (0x04000000)
+#define GPIO_WAKE_GPIOPOL9_            (0x02000000)
+#define GPIO_WAKE_GPIOPOL8_            (0x01000000)
+#define GPIO_WAKE_GPIOPOL7_            (0x00800000)
+#define GPIO_WAKE_GPIOPOL6_            (0x00400000)
+#define GPIO_WAKE_GPIOPOL5_            (0x00200000)
+#define GPIO_WAKE_GPIOPOL4_            (0x00100000)
+#define GPIO_WAKE_GPIOPOL3_            (0x00080000)
+#define GPIO_WAKE_GPIOPOL2_            (0x00040000)
+#define GPIO_WAKE_GPIOPOL1_            (0x00020000)
+#define GPIO_WAKE_GPIOPOL0_            (0x00010000)
+#define GPIO_WAKE_GPIOWK_MASK_         (0x00000FFF)
+#define GPIO_WAKE_GPIOWK11_            (0x00000800)
+#define GPIO_WAKE_GPIOWK10_            (0x00000400)
+#define GPIO_WAKE_GPIOWK9_             (0x00000200)
+#define GPIO_WAKE_GPIOWK8_             (0x00000100)
+#define GPIO_WAKE_GPIOWK7_             (0x00000080)
+#define GPIO_WAKE_GPIOWK6_             (0x00000040)
+#define GPIO_WAKE_GPIOWK5_             (0x00000020)
+#define GPIO_WAKE_GPIOWK4_             (0x00000010)
+#define GPIO_WAKE_GPIOWK3_             (0x00000008)
+#define GPIO_WAKE_GPIOWK2_             (0x00000004)
+#define GPIO_WAKE_GPIOWK1_             (0x00000002)
+#define GPIO_WAKE_GPIOWK0_             (0x00000001)
+
+#define DP_SEL                         (0x024)
+#define DP_SEL_DPRDY_                  (0x80000000)
+#define DP_SEL_RSEL_MASK_              (0x0000000F)
+#define DP_SEL_RSEL_USB_PHY_CSRS_      (0x0000000F)
+#define DP_SEL_RSEL_OTP_64BIT_         (0x00000009)
+#define DP_SEL_RSEL_OTP_8BIT_          (0x00000008)
+#define DP_SEL_RSEL_UTX_BUF_RAM_       (0x00000007)
+#define DP_SEL_RSEL_DESC_RAM_          (0x00000005)
+#define DP_SEL_RSEL_TXFIFO_            (0x00000004)
+#define DP_SEL_RSEL_RXFIFO_            (0x00000003)
+#define DP_SEL_RSEL_LSO_               (0x00000002)
+#define DP_SEL_RSEL_VLAN_DA_           (0x00000001)
+#define DP_SEL_RSEL_URXBUF_            (0x00000000)
+#define DP_SEL_VHF_HASH_LEN            (16)
+#define DP_SEL_VHF_VLAN_LEN            (128)
+
+#define DP_CMD                         (0x028)
+#define DP_CMD_WRITE_                  (0x00000001)
+#define DP_CMD_READ_                   (0x00000000)
+
+#define DP_ADDR                                (0x02C)
+#define DP_ADDR_MASK_                  (0x00003FFF)
+
+#define DP_DATA                                (0x030)
+
+#define E2P_CMD                                (0x040)
+#define E2P_CMD_EPC_BUSY_              (0x80000000)
+#define E2P_CMD_EPC_CMD_MASK_          (0x70000000)
+#define E2P_CMD_EPC_CMD_RELOAD_                (0x70000000)
+#define E2P_CMD_EPC_CMD_ERAL_          (0x60000000)
+#define E2P_CMD_EPC_CMD_ERASE_         (0x50000000)
+#define E2P_CMD_EPC_CMD_WRAL_          (0x40000000)
+#define E2P_CMD_EPC_CMD_WRITE_         (0x30000000)
+#define E2P_CMD_EPC_CMD_EWEN_          (0x20000000)
+#define E2P_CMD_EPC_CMD_EWDS_          (0x10000000)
+#define E2P_CMD_EPC_CMD_READ_          (0x00000000)
+#define E2P_CMD_EPC_TIMEOUT_           (0x00000400)
+#define E2P_CMD_EPC_DL_                        (0x00000200)
+#define E2P_CMD_EPC_ADDR_MASK_         (0x000001FF)
+
+#define E2P_DATA                       (0x044)
+#define E2P_DATA_EEPROM_DATA_MASK_     (0x000000FF)
+
+#define BOS_ATTR                       (0x050)
+#define BOS_ATTR_BLOCK_SIZE_MASK_      (0x000000FF)
+
+#define SS_ATTR                                (0x054)
+#define SS_ATTR_POLL_INT_MASK_         (0x00FF0000)
+#define SS_ATTR_DEV_DESC_SIZE_MASK_    (0x0000FF00)
+#define SS_ATTR_CFG_BLK_SIZE_MASK_     (0x000000FF)
+
+#define HS_ATTR                                (0x058)
+#define HS_ATTR_POLL_INT_MASK_         (0x00FF0000)
+#define HS_ATTR_DEV_DESC_SIZE_MASK_    (0x0000FF00)
+#define HS_ATTR_CFG_BLK_SIZE_MASK_     (0x000000FF)
+
+#define FS_ATTR                                (0x05C)
+#define FS_ATTR_POLL_INT_MASK_         (0x00FF0000)
+#define FS_ATTR_DEV_DESC_SIZE_MASK_    (0x0000FF00)
+#define FS_ATTR_CFG_BLK_SIZE_MASK_     (0x000000FF)
+
+#define STR_ATTR0                          (0x060)
+#define STR_ATTR0_CFGSTR_DESC_SIZE_MASK_    (0xFF000000)
+#define STR_ATTR0_SERSTR_DESC_SIZE_MASK_    (0x00FF0000)
+#define STR_ATTR0_PRODSTR_DESC_SIZE_MASK_   (0x0000FF00)
+#define STR_ATTR0_MANUF_DESC_SIZE_MASK_     (0x000000FF)
+
+#define STR_ATTR1                          (0x064)
+#define STR_ATTR1_INTSTR_DESC_SIZE_MASK_    (0x000000FF)
+
+#define STR_FLAG_ATTR                      (0x068)
+#define STR_FLAG_ATTR_PME_FLAGS_MASK_      (0x000000FF)
+
+#define USB_CFG0                       (0x080)
+#define USB_CFG_LPM_RESPONSE_          (0x80000000)
+#define USB_CFG_LPM_CAPABILITY_                (0x40000000)
+#define USB_CFG_LPM_ENBL_SLPM_         (0x20000000)
+#define USB_CFG_HIRD_THR_MASK_         (0x1F000000)
+#define USB_CFG_HIRD_THR_960_          (0x1C000000)
+#define USB_CFG_HIRD_THR_885_          (0x1B000000)
+#define USB_CFG_HIRD_THR_810_          (0x1A000000)
+#define USB_CFG_HIRD_THR_735_          (0x19000000)
+#define USB_CFG_HIRD_THR_660_          (0x18000000)
+#define USB_CFG_HIRD_THR_585_          (0x17000000)
+#define USB_CFG_HIRD_THR_510_          (0x16000000)
+#define USB_CFG_HIRD_THR_435_          (0x15000000)
+#define USB_CFG_HIRD_THR_360_          (0x14000000)
+#define USB_CFG_HIRD_THR_285_          (0x13000000)
+#define USB_CFG_HIRD_THR_210_          (0x12000000)
+#define USB_CFG_HIRD_THR_135_          (0x11000000)
+#define USB_CFG_HIRD_THR_60_           (0x10000000)
+#define USB_CFG_MAX_BURST_BI_MASK_     (0x00F00000)
+#define USB_CFG_MAX_BURST_BO_MASK_     (0x000F0000)
+#define USB_CFG_MAX_DEV_SPEED_MASK_    (0x0000E000)
+#define USB_CFG_MAX_DEV_SPEED_SS_      (0x00008000)
+#define USB_CFG_MAX_DEV_SPEED_HS_      (0x00000000)
+#define USB_CFG_MAX_DEV_SPEED_FS_      (0x00002000)
+#define USB_CFG_PHY_BOOST_MASK_                (0x00000180)
+#define USB_CFG_PHY_BOOST_PLUS_12_     (0x00000180)
+#define USB_CFG_PHY_BOOST_PLUS_8_      (0x00000100)
+#define USB_CFG_PHY_BOOST_PLUS_4_      (0x00000080)
+#define USB_CFG_PHY_BOOST_NORMAL_      (0x00000000)
+#define USB_CFG_BIR_                   (0x00000040)
+#define USB_CFG_BCE_                   (0x00000020)
+#define USB_CFG_PORT_SWAP_             (0x00000010)
+#define USB_CFG_LPM_EN_                        (0x00000008)
+#define USB_CFG_RMT_WKP_               (0x00000004)
+#define USB_CFG_PWR_SEL_               (0x00000002)
+#define USB_CFG_STALL_BO_DIS_          (0x00000001)
+
+#define USB_CFG1                       (0x084)
+#define USB_CFG1_U1_TIMEOUT_MASK_      (0xFF000000)
+#define USB_CFG1_U2_TIMEOUT_MASK_      (0x00FF0000)
+#define USB_CFG1_HS_TOUT_CAL_MASK_     (0x0000E000)
+#define USB_CFG1_DEV_U2_INIT_EN_       (0x00001000)
+#define USB_CFG1_DEV_U2_EN_            (0x00000800)
+#define USB_CFG1_DEV_U1_INIT_EN_       (0x00000400)
+#define USB_CFG1_DEV_U1_EN_            (0x00000200)
+#define USB_CFG1_LTM_ENABLE_           (0x00000100)
+#define USB_CFG1_FS_TOUT_CAL_MASK_     (0x00000070)
+#define USB_CFG1_SCALE_DOWN_MASK_      (0x00000003)
+#define USB_CFG1_SCALE_DOWN_MODE3_     (0x00000003)
+#define USB_CFG1_SCALE_DOWN_MODE2_     (0x00000002)
+#define USB_CFG1_SCALE_DOWN_MODE1_     (0x00000001)
+#define USB_CFG1_SCALE_DOWN_MODE0_     (0x00000000)
+
+#define USB_CFG2                           (0x088)
+#define USB_CFG2_SS_DETACH_TIME_MASK_      (0xFFFF0000)
+#define USB_CFG2_HS_DETACH_TIME_MASK_      (0x0000FFFF)
+
+#define BURST_CAP                      (0x090)
+#define BURST_CAP_SIZE_MASK_           (0x000000FF)
+
+#define BULK_IN_DLY                    (0x094)
+#define BULK_IN_DLY_MASK_              (0x0000FFFF)
+
+#define INT_EP_CTL                     (0x098)
+#define INT_EP_INTEP_ON_               (0x80000000)
+#define INT_STS_EEE_TX_LPI_STRT_EN_    (0x04000000)
+#define INT_STS_EEE_TX_LPI_STOP_EN_    (0x02000000)
+#define INT_STS_EEE_RX_LPI_EN_         (0x01000000)
+#define INT_EP_RDFO_EN_                        (0x00400000)
+#define INT_EP_TXE_EN_                 (0x00200000)
+#define INT_EP_TX_DIS_EN_              (0x00080000)
+#define INT_EP_RX_DIS_EN_              (0x00040000)
+#define INT_EP_PHY_INT_EN_             (0x00020000)
+#define INT_EP_DP_INT_EN_              (0x00010000)
+#define INT_EP_MAC_ERR_EN_             (0x00008000)
+#define INT_EP_TDFU_EN_                        (0x00004000)
+#define INT_EP_TDFO_EN_                        (0x00002000)
+#define INT_EP_UTX_FP_EN_              (0x00001000)
+#define INT_EP_GPIO_EN_MASK_           (0x00000FFF)
+
+#define PIPE_CTL                       (0x09C)
+#define PIPE_CTL_TXSWING_              (0x00000040)
+#define PIPE_CTL_TXMARGIN_MASK_                (0x00000038)
+#define PIPE_CTL_TXDEEMPHASIS_MASK_    (0x00000006)
+#define PIPE_CTL_ELASTICITYBUFFERMODE_ (0x00000001)
+
+#define U1_LATENCY                     (0xA0)
+#define U2_LATENCY                     (0xA4)
+
+#define USB_STATUS                     (0x0A8)
+#define USB_STATUS_REMOTE_WK_          (0x00100000)
+#define USB_STATUS_FUNC_REMOTE_WK_     (0x00080000)
+#define USB_STATUS_LTM_ENABLE_         (0x00040000)
+#define USB_STATUS_U2_ENABLE_          (0x00020000)
+#define USB_STATUS_U1_ENABLE_          (0x00010000)
+#define USB_STATUS_SET_SEL_            (0x00000020)
+#define USB_STATUS_REMOTE_WK_STS_      (0x00000010)
+#define USB_STATUS_FUNC_REMOTE_WK_STS_ (0x00000008)
+#define USB_STATUS_LTM_ENABLE_STS_     (0x00000004)
+#define USB_STATUS_U2_ENABLE_STS_      (0x00000002)
+#define USB_STATUS_U1_ENABLE_STS_      (0x00000001)
+
+#define USB_CFG3                       (0x0AC)
+#define USB_CFG3_EN_U2_LTM_            (0x40000000)
+#define USB_CFG3_BULK_OUT_NUMP_OVR_    (0x20000000)
+#define USB_CFG3_DIS_FAST_U1_EXIT_     (0x10000000)
+#define USB_CFG3_LPM_NYET_THR_         (0x0F000000)
+#define USB_CFG3_RX_DET_2_POL_LFPS_    (0x00800000)
+#define USB_CFG3_LFPS_FILT_            (0x00400000)
+#define USB_CFG3_SKIP_RX_DET_          (0x00200000)
+#define USB_CFG3_DELAY_P1P2P3_         (0x001C0000)
+#define USB_CFG3_DELAY_PHY_PWR_CHG_    (0x00020000)
+#define USB_CFG3_U1U2_EXIT_FR_         (0x00010000)
+#define USB_CFG3_REQ_P1P2P3            (0x00008000)
+#define USB_CFG3_HST_PRT_CMPL_         (0x00004000)
+#define USB_CFG3_DIS_SCRAMB_           (0x00002000)
+#define USB_CFG3_PWR_DN_SCALE_         (0x00001FFF)
+
+#define RFE_CTL                                (0x0B0)
+#define RFE_CTL_IGMP_COE_              (0x00004000)
+#define RFE_CTL_ICMP_COE_              (0x00002000)
+#define RFE_CTL_TCPUDP_COE_            (0x00001000)
+#define RFE_CTL_IP_COE_                        (0x00000800)
+#define RFE_CTL_BCAST_EN_              (0x00000400)
+#define RFE_CTL_MCAST_EN_              (0x00000200)
+#define RFE_CTL_UCAST_EN_              (0x00000100)
+#define RFE_CTL_VLAN_STRIP_            (0x00000080)
+#define RFE_CTL_DISCARD_UNTAGGED_      (0x00000040)
+#define RFE_CTL_VLAN_FILTER_           (0x00000020)
+#define RFE_CTL_SA_FILTER_             (0x00000010)
+#define RFE_CTL_MCAST_HASH_            (0x00000008)
+#define RFE_CTL_DA_HASH_               (0x00000004)
+#define RFE_CTL_DA_PERFECT_            (0x00000002)
+#define RFE_CTL_RST_                   (0x00000001)
+
+#define VLAN_TYPE                      (0x0B4)
+#define VLAN_TYPE_MASK_                        (0x0000FFFF)
+
+#define FCT_RX_CTL                     (0x0C0)
+#define FCT_RX_CTL_EN_                 (0x80000000)
+#define FCT_RX_CTL_RST_                        (0x40000000)
+#define FCT_RX_CTL_SBF_                        (0x02000000)
+#define FCT_RX_CTL_OVFL_               (0x01000000)
+#define FCT_RX_CTL_DROP_               (0x00800000)
+#define FCT_RX_CTL_NOT_EMPTY_          (0x00400000)
+#define FCT_RX_CTL_EMPTY_              (0x00200000)
+#define FCT_RX_CTL_DIS_                        (0x00100000)
+#define FCT_RX_CTL_USED_MASK_          (0x0000FFFF)
+
+#define FCT_TX_CTL                     (0x0C4)
+#define FCT_TX_CTL_EN_                 (0x80000000)
+#define FCT_TX_CTL_RST_                        (0x40000000)
+#define FCT_TX_CTL_NOT_EMPTY_          (0x00400000)
+#define FCT_TX_CTL_EMPTY_              (0x00200000)
+#define FCT_TX_CTL_DIS_                        (0x00100000)
+#define FCT_TX_CTL_USED_MASK_          (0x0000FFFF)
+
+#define FCT_RX_FIFO_END                        (0x0C8)
+#define FCT_RX_FIFO_END_MASK_          (0x0000007F)
+
+#define FCT_TX_FIFO_END                        (0x0CC)
+#define FCT_TX_FIFO_END_MASK_          (0x0000003F)
+
+#define FCT_FLOW                       (0x0D0)
+#define FCT_FLOW_OFF_MASK_             (0x00007F00)
+#define FCT_FLOW_ON_MASK_              (0x0000007F)
+
+#define RX_DP_STOR                     (0x0D4)
+#define RX_DP_STORE_TOT_RXUSED_MASK_   (0xFFFF0000)
+#define RX_DP_STORE_UTX_RXUSED_MASK_   (0x0000FFFF)
+
+#define TX_DP_STOR                     (0x0D8)
+#define TX_DP_STORE_TOT_TXUSED_MASK_   (0xFFFF0000)
+#define TX_DP_STORE_URX_TXUSED_MASK_   (0x0000FFFF)
+
+#define LTM_BELT_IDLE0                 (0x0E0)
+#define LTM_BELT_IDLE0_IDLE1000_       (0x0FFF0000)
+#define LTM_BELT_IDLE0_IDLE100_                (0x00000FFF)
+
+#define LTM_BELT_IDLE1                 (0x0E4)
+#define LTM_BELT_IDLE1_IDLE10_         (0x00000FFF)
+
+#define LTM_BELT_ACT0                  (0x0E8)
+#define LTM_BELT_ACT0_ACT1000_         (0x0FFF0000)
+#define LTM_BELT_ACT0_ACT100_          (0x00000FFF)
+
+#define LTM_BELT_ACT1                  (0x0EC)
+#define LTM_BELT_ACT1_ACT10_           (0x00000FFF)
+
+#define LTM_INACTIVE0                  (0x0F0)
+#define LTM_INACTIVE0_TIMER1000_       (0xFFFF0000)
+#define LTM_INACTIVE0_TIMER100_                (0x0000FFFF)
+
+#define LTM_INACTIVE1                  (0x0F4)
+#define LTM_INACTIVE1_TIMER10_         (0x0000FFFF)
+
+#define MAC_CR                         (0x100)
+#define MAC_CR_GMII_EN_                        (0x00080000)
+#define MAC_CR_EEE_TX_CLK_STOP_EN_     (0x00040000)
+#define MAC_CR_EEE_EN_                 (0x00020000)
+#define MAC_CR_EEE_TLAR_EN_            (0x00010000)
+#define MAC_CR_ADP_                    (0x00002000)
+#define MAC_CR_AUTO_DUPLEX_            (0x00001000)
+#define MAC_CR_AUTO_SPEED_             (0x00000800)
+#define MAC_CR_LOOPBACK_               (0x00000400)
+#define MAC_CR_BOLMT_MASK_             (0x000000C0)
+#define MAC_CR_FULL_DUPLEX_            (0x00000008)
+#define MAC_CR_SPEED_MASK_             (0x00000006)
+#define MAC_CR_SPEED_1000_             (0x00000004)
+#define MAC_CR_SPEED_100_              (0x00000002)
+#define MAC_CR_SPEED_10_               (0x00000000)
+#define MAC_CR_RST_                    (0x00000001)
+
+#define MAC_RX                         (0x104)
+#define MAC_RX_MAX_SIZE_SHIFT_         (16)
+#define MAC_RX_MAX_SIZE_MASK_          (0x3FFF0000)
+#define MAC_RX_FCS_STRIP_              (0x00000010)
+#define MAC_RX_VLAN_FSE_               (0x00000004)
+#define MAC_RX_RXD_                    (0x00000002)
+#define MAC_RX_RXEN_                   (0x00000001)
+
+#define MAC_TX                         (0x108)
+#define MAC_TX_BAD_FCS_                        (0x00000004)
+#define MAC_TX_TXD_                    (0x00000002)
+#define MAC_TX_TXEN_                   (0x00000001)
+
+#define FLOW                           (0x10C)
+#define FLOW_CR_FORCE_FC_              (0x80000000)
+#define FLOW_CR_TX_FCEN_               (0x40000000)
+#define FLOW_CR_RX_FCEN_               (0x20000000)
+#define FLOW_CR_FPF_                   (0x10000000)
+#define FLOW_CR_FCPT_MASK_             (0x0000FFFF)
+
+#define RAND_SEED                      (0x110)
+#define RAND_SEED_MASK_                        (0x0000FFFF)
+
+#define ERR_STS                                (0x114)
+#define ERR_STS_FERR_                  (0x00000100)
+#define ERR_STS_LERR_                  (0x00000080)
+#define ERR_STS_RFERR_                 (0x00000040)
+#define ERR_STS_ECERR_                 (0x00000010)
+#define ERR_STS_ALERR_                 (0x00000008)
+#define ERR_STS_URERR_                 (0x00000004)
+
+#define RX_ADDRH                       (0x118)
+#define RX_ADDRH_MASK_                 (0x0000FFFF)
+
+#define RX_ADDRL                       (0x11C)
+#define RX_ADDRL_MASK_                 (0xFFFFFFFF)
+
+#define MII_ACC                                (0x120)
+#define MII_ACC_PHY_ADDR_SHIFT_                (11)
+#define MII_ACC_PHY_ADDR_MASK_         (0x0000F800)
+#define MII_ACC_MIIRINDA_SHIFT_                (6)
+#define MII_ACC_MIIRINDA_MASK_         (0x000007C0)
+#define MII_ACC_MII_READ_              (0x00000000)
+#define MII_ACC_MII_WRITE_             (0x00000002)
+#define MII_ACC_MII_BUSY_              (0x00000001)
+
+#define MII_DATA                       (0x124)
+#define MII_DATA_MASK_                 (0x0000FFFF)
+
+#define MAC_RGMII_ID                   (0x128)
+#define MAC_RGMII_ID_TXC_DELAY_EN_     (0x00000002)
+#define MAC_RGMII_ID_RXC_DELAY_EN_     (0x00000001)
+
+#define EEE_TX_LPI_REQ_DLY             (0x130)
+#define EEE_TX_LPI_REQ_DLY_CNT_MASK_   (0xFFFFFFFF)
+
+#define EEE_TW_TX_SYS                  (0x134)
+#define EEE_TW_TX_SYS_CNT1G_MASK_      (0xFFFF0000)
+#define EEE_TW_TX_SYS_CNT100M_MASK_    (0x0000FFFF)
+
+#define EEE_TX_LPI_REM_DLY             (0x138)
+#define EEE_TX_LPI_REM_DLY_CNT_                (0x00FFFFFF)
+
+#define WUCSR                          (0x140)
+#define WUCSR_TESTMODE_                        (0x80000000)
+#define WUCSR_RFE_WAKE_EN_             (0x00004000)
+#define WUCSR_EEE_TX_WAKE_             (0x00002000)
+#define WUCSR_EEE_TX_WAKE_EN_          (0x00001000)
+#define WUCSR_EEE_RX_WAKE_             (0x00000800)
+#define WUCSR_EEE_RX_WAKE_EN_          (0x00000400)
+#define WUCSR_RFE_WAKE_FR_             (0x00000200)
+#define WUCSR_STORE_WAKE_              (0x00000100)
+#define WUCSR_PFDA_FR_                 (0x00000080)
+#define WUCSR_WUFR_                    (0x00000040)
+#define WUCSR_MPR_                     (0x00000020)
+#define WUCSR_BCST_FR_                 (0x00000010)
+#define WUCSR_PFDA_EN_                 (0x00000008)
+#define WUCSR_WAKE_EN_                 (0x00000004)
+#define WUCSR_MPEN_                    (0x00000002)
+#define WUCSR_BCST_EN_                 (0x00000001)
+
+#define WK_SRC                         (0x144)
+#define WK_SRC_GPIOX_INT_WK_SHIFT_     (20)
+#define WK_SRC_GPIOX_INT_WK_MASK_      (0xFFF00000)
+#define WK_SRC_IPV6_TCPSYN_RCD_WK_     (0x00010000)
+#define WK_SRC_IPV4_TCPSYN_RCD_WK_     (0x00008000)
+#define WK_SRC_EEE_TX_WK_              (0x00004000)
+#define WK_SRC_EEE_RX_WK_              (0x00002000)
+#define WK_SRC_GOOD_FR_WK_             (0x00001000)
+#define WK_SRC_PFDA_FR_WK_             (0x00000800)
+#define WK_SRC_MP_FR_WK_               (0x00000400)
+#define WK_SRC_BCAST_FR_WK_            (0x00000200)
+#define WK_SRC_WU_FR_WK_               (0x00000100)
+#define WK_SRC_WUFF_MATCH_MASK_                (0x0000001F)
+
+#define WUF_CFG0                       (0x150)
+#define NUM_OF_WUF_CFG                 (32)
+#define WUF_CFG_BEGIN                  (WUF_CFG0)
+#define WUF_CFG(index)                 (WUF_CFG_BEGIN + (4 * (index)))
+#define WUF_CFGX_EN_                   (0x80000000)
+#define WUF_CFGX_TYPE_MASK_            (0x03000000)
+#define WUF_CFGX_TYPE_MCAST_           (0x02000000)
+#define WUF_CFGX_TYPE_ALL_             (0x01000000)
+#define WUF_CFGX_TYPE_UCAST_           (0x00000000)
+#define WUF_CFGX_OFFSET_SHIFT_         (16)
+#define WUF_CFGX_OFFSET_MASK_          (0x00FF0000)
+#define WUF_CFGX_CRC16_MASK_           (0x0000FFFF)
+
+#define WUF_MASK0_0                    (0x200)
+#define WUF_MASK0_1                    (0x204)
+#define WUF_MASK0_2                    (0x208)
+#define WUF_MASK0_3                    (0x20C)
+#define NUM_OF_WUF_MASK                        (32)
+#define WUF_MASK0_BEGIN                        (WUF_MASK0_0)
+#define WUF_MASK1_BEGIN                        (WUF_MASK0_1)
+#define WUF_MASK2_BEGIN                        (WUF_MASK0_2)
+#define WUF_MASK3_BEGIN                        (WUF_MASK0_3)
+#define WUF_MASK0(index)               (WUF_MASK0_BEGIN + (0x10 * (index)))
+#define WUF_MASK1(index)               (WUF_MASK1_BEGIN + (0x10 * (index)))
+#define WUF_MASK2(index)               (WUF_MASK2_BEGIN + (0x10 * (index)))
+#define WUF_MASK3(index)               (WUF_MASK3_BEGIN + (0x10 * (index)))
+
+#define MAF_BASE                       (0x400)
+#define MAF_HIX                                (0x00)
+#define MAF_LOX                                (0x04)
+#define NUM_OF_MAF                     (33)
+#define MAF_HI_BEGIN                   (MAF_BASE + MAF_HIX)
+#define MAF_LO_BEGIN                   (MAF_BASE + MAF_LOX)
+#define MAF_HI(index)                  (MAF_BASE + (8 * (index)) + (MAF_HIX))
+#define MAF_LO(index)                  (MAF_BASE + (8 * (index)) + (MAF_LOX))
+#define MAF_HI_VALID_                  (0x80000000)
+#define MAF_HI_TYPE_MASK_              (0x40000000)
+#define MAF_HI_TYPE_SRC_               (0x40000000)
+#define MAF_HI_TYPE_DST_               (0x00000000)
+#define MAF_HI_ADDR_MASK               (0x0000FFFF)
+#define MAF_LO_ADDR_MASK               (0xFFFFFFFF)
+
+#define WUCSR2                         (0x600)
+#define WUCSR2_CSUM_DISABLE_           (0x80000000)
+#define WUCSR2_NA_SA_SEL_              (0x00000100)
+#define WUCSR2_NS_RCD_                 (0x00000080)
+#define WUCSR2_ARP_RCD_                        (0x00000040)
+#define WUCSR2_IPV6_TCPSYN_RCD_                (0x00000020)
+#define WUCSR2_IPV4_TCPSYN_RCD_                (0x00000010)
+#define WUCSR2_NS_OFFLOAD_EN_          (0x00000008)
+#define WUCSR2_ARP_OFFLOAD_EN_         (0x00000004)
+#define WUCSR2_IPV6_TCPSYN_WAKE_EN_    (0x00000002)
+#define WUCSR2_IPV4_TCPSYN_WAKE_EN_    (0x00000001)
+
+#define NS1_IPV6_ADDR_DEST0            (0x610)
+#define NS1_IPV6_ADDR_DEST1            (0x614)
+#define NS1_IPV6_ADDR_DEST2            (0x618)
+#define NS1_IPV6_ADDR_DEST3            (0x61C)
+
+#define NS1_IPV6_ADDR_SRC0             (0x620)
+#define NS1_IPV6_ADDR_SRC1             (0x624)
+#define NS1_IPV6_ADDR_SRC2             (0x628)
+#define NS1_IPV6_ADDR_SRC3             (0x62C)
+
+#define NS1_ICMPV6_ADDR0_0             (0x630)
+#define NS1_ICMPV6_ADDR0_1             (0x634)
+#define NS1_ICMPV6_ADDR0_2             (0x638)
+#define NS1_ICMPV6_ADDR0_3             (0x63C)
+
+#define NS1_ICMPV6_ADDR1_0             (0x640)
+#define NS1_ICMPV6_ADDR1_1             (0x644)
+#define NS1_ICMPV6_ADDR1_2             (0x648)
+#define NS1_ICMPV6_ADDR1_3             (0x64C)
+
+#define NS2_IPV6_ADDR_DEST0            (0x650)
+#define NS2_IPV6_ADDR_DEST1            (0x654)
+#define NS2_IPV6_ADDR_DEST2            (0x658)
+#define NS2_IPV6_ADDR_DEST3            (0x65C)
+
+#define NS2_IPV6_ADDR_SRC0             (0x660)
+#define NS2_IPV6_ADDR_SRC1             (0x664)
+#define NS2_IPV6_ADDR_SRC2             (0x668)
+#define NS2_IPV6_ADDR_SRC3             (0x66C)
+
+#define NS2_ICMPV6_ADDR0_0             (0x670)
+#define NS2_ICMPV6_ADDR0_1             (0x674)
+#define NS2_ICMPV6_ADDR0_2             (0x678)
+#define NS2_ICMPV6_ADDR0_3             (0x67C)
+
+#define NS2_ICMPV6_ADDR1_0             (0x680)
+#define NS2_ICMPV6_ADDR1_1             (0x684)
+#define NS2_ICMPV6_ADDR1_2             (0x688)
+#define NS2_ICMPV6_ADDR1_3             (0x68C)
+
+#define SYN_IPV4_ADDR_SRC              (0x690)
+#define SYN_IPV4_ADDR_DEST             (0x694)
+#define SYN_IPV4_TCP_PORTS             (0x698)
+#define SYN_IPV4_TCP_PORTS_IPV4_DEST_PORT_SHIFT_    (16)
+#define SYN_IPV4_TCP_PORTS_IPV4_DEST_PORT_MASK_     (0xFFFF0000)
+#define SYN_IPV4_TCP_PORTS_IPV4_SRC_PORT_MASK_     (0x0000FFFF)
+
+#define SYN_IPV6_ADDR_SRC0             (0x69C)
+#define SYN_IPV6_ADDR_SRC1             (0x6A0)
+#define SYN_IPV6_ADDR_SRC2             (0x6A4)
+#define SYN_IPV6_ADDR_SRC3             (0x6A8)
+
+#define SYN_IPV6_ADDR_DEST0            (0x6AC)
+#define SYN_IPV6_ADDR_DEST1            (0x6B0)
+#define SYN_IPV6_ADDR_DEST2            (0x6B4)
+#define SYN_IPV6_ADDR_DEST3            (0x6B8)
+
+#define SYN_IPV6_TCP_PORTS             (0x6BC)
+#define SYN_IPV6_TCP_PORTS_IPV6_DEST_PORT_SHIFT_    (16)
+#define SYN_IPV6_TCP_PORTS_IPV6_DEST_PORT_MASK_     (0xFFFF0000)
+#define SYN_IPV6_TCP_PORTS_IPV6_SRC_PORT_MASK_     (0x0000FFFF)
+
+#define ARP_SPA                                (0x6C0)
+#define ARP_TPA                                (0x6C4)
+
+#define PHY_DEV_ID                     (0x700)
+#define PHY_DEV_ID_REV_SHIFT_          (28)
+#define PHY_DEV_ID_REV_SHIFT_          (28)
+#define PHY_DEV_ID_REV_MASK_           (0xF0000000)
+#define PHY_DEV_ID_MODEL_SHIFT_                (22)
+#define PHY_DEV_ID_MODEL_MASK_         (0x0FC00000)
+#define PHY_DEV_ID_OUI_MASK_           (0x003FFFFF)
+
+#define OTP_BASE_ADDR                  (0x00001000)
+#define OTP_ADDR_RANGE_                        (0x1FF)
+
+#define OTP_PWR_DN                     (OTP_BASE_ADDR + 4 * 0x00)
+#define OTP_PWR_DN_PWRDN_N_            (0x01)
+
+#define OTP_ADDR1                      (OTP_BASE_ADDR + 4 * 0x01)
+#define OTP_ADDR1_15_11                        (0x1F)
+
+#define OTP_ADDR2                      (OTP_BASE_ADDR + 4 * 0x02)
+#define OTP_ADDR2_10_3                 (0xFF)
+
+#define OTP_ADDR3                      (OTP_BASE_ADDR + 4 * 0x03)
+#define OTP_ADDR3_2_0                  (0x03)
+
+#define OTP_PRGM_DATA                  (OTP_BASE_ADDR + 4 * 0x04)
+
+#define OTP_PRGM_MODE                  (OTP_BASE_ADDR + 4 * 0x05)
+#define OTP_PRGM_MODE_BYTE_            (0x01)
+
+#define OTP_RD_DATA                    (OTP_BASE_ADDR + 4 * 0x06)
+
+#define OTP_FUNC_CMD                   (OTP_BASE_ADDR + 4 * 0x08)
+#define OTP_FUNC_CMD_RESET_            (0x04)
+#define OTP_FUNC_CMD_PROGRAM_          (0x02)
+#define OTP_FUNC_CMD_READ_             (0x01)
+
+#define OTP_TST_CMD                    (OTP_BASE_ADDR + 4 * 0x09)
+#define OTP_TST_CMD_TEST_DEC_SEL_      (0x10)
+#define OTP_TST_CMD_PRGVRFY_           (0x08)
+#define OTP_TST_CMD_WRTEST_            (0x04)
+#define OTP_TST_CMD_TESTDEC_           (0x02)
+#define OTP_TST_CMD_BLANKCHECK_                (0x01)
+
+#define OTP_CMD_GO                     (OTP_BASE_ADDR + 4 * 0x0A)
+#define OTP_CMD_GO_GO_                 (0x01)
+
+#define OTP_PASS_FAIL                  (OTP_BASE_ADDR + 4 * 0x0B)
+#define OTP_PASS_FAIL_PASS_            (0x02)
+#define OTP_PASS_FAIL_FAIL_            (0x01)
+
+#define OTP_STATUS                     (OTP_BASE_ADDR + 4 * 0x0C)
+#define OTP_STATUS_OTP_LOCK_           (0x10)
+#define OTP_STATUS_WEB_                        (0x08)
+#define OTP_STATUS_PGMEN               (0x04)
+#define OTP_STATUS_CPUMPEN_            (0x02)
+#define OTP_STATUS_BUSY_               (0x01)
+
+#define OTP_MAX_PRG                    (OTP_BASE_ADDR + 4 * 0x0D)
+#define OTP_MAX_PRG_MAX_PROG           (0x1F)
+
+#define OTP_INTR_STATUS                        (OTP_BASE_ADDR + 4 * 0x10)
+#define OTP_INTR_STATUS_READY_         (0x01)
+
+#define OTP_INTR_MASK                  (OTP_BASE_ADDR + 4 * 0x11)
+#define OTP_INTR_MASK_READY_           (0x01)
+
+#define OTP_RSTB_PW1                   (OTP_BASE_ADDR + 4 * 0x14)
+#define OTP_RSTB_PW2                   (OTP_BASE_ADDR + 4 * 0x15)
+#define OTP_PGM_PW1                    (OTP_BASE_ADDR + 4 * 0x18)
+#define OTP_PGM_PW2                    (OTP_BASE_ADDR + 4 * 0x19)
+#define OTP_READ_PW1                   (OTP_BASE_ADDR + 4 * 0x1C)
+#define OTP_READ_PW2                   (OTP_BASE_ADDR + 4 * 0x1D)
+#define OTP_TCRST                      (OTP_BASE_ADDR + 4 * 0x20)
+#define OTP_RSRD                       (OTP_BASE_ADDR + 4 * 0x21)
+#define OTP_TREADEN_VAL                        (OTP_BASE_ADDR + 4 * 0x22)
+#define OTP_TDLES_VAL                  (OTP_BASE_ADDR + 4 * 0x23)
+#define OTP_TWWL_VAL                   (OTP_BASE_ADDR + 4 * 0x24)
+#define OTP_TDLEH_VAL                  (OTP_BASE_ADDR + 4 * 0x25)
+#define OTP_TWPED_VAL                  (OTP_BASE_ADDR + 4 * 0x26)
+#define OTP_TPES_VAL                   (OTP_BASE_ADDR + 4 * 0x27)
+#define OTP_TCPS_VAL                   (OTP_BASE_ADDR + 4 * 0x28)
+#define OTP_TCPH_VAL                   (OTP_BASE_ADDR + 4 * 0x29)
+#define OTP_TPGMVFY_VAL                        (OTP_BASE_ADDR + 4 * 0x2A)
+#define OTP_TPEH_VAL                   (OTP_BASE_ADDR + 4 * 0x2B)
+#define OTP_TPGRST_VAL                 (OTP_BASE_ADDR + 4 * 0x2C)
+#define OTP_TCLES_VAL                  (OTP_BASE_ADDR + 4 * 0x2D)
+#define OTP_TCLEH_VAL                  (OTP_BASE_ADDR + 4 * 0x2E)
+#define OTP_TRDES_VAL                  (OTP_BASE_ADDR + 4 * 0x2F)
+#define OTP_TBCACC_VAL                 (OTP_BASE_ADDR + 4 * 0x30)
+#define OTP_TAAC_VAL                   (OTP_BASE_ADDR + 4 * 0x31)
+#define OTP_TACCT_VAL                  (OTP_BASE_ADDR + 4 * 0x32)
+#define OTP_TRDEP_VAL                  (OTP_BASE_ADDR + 4 * 0x38)
+#define OTP_TPGSV_VAL                  (OTP_BASE_ADDR + 4 * 0x39)
+#define OTP_TPVSR_VAL                  (OTP_BASE_ADDR + 4 * 0x3A)
+#define OTP_TPVHR_VAL                  (OTP_BASE_ADDR + 4 * 0x3B)
+#define OTP_TPVSA_VAL                  (OTP_BASE_ADDR + 4 * 0x3C)
+
+#define PHY_ID1                                (0x02)
+#define PHY_ID2                                (0x03)
+
+#define PHY_DEV_ID_OUI_VTSE            (0x04001C)
+#define PHY_DEV_ID_MODEL_VTSE_8502     (0x23)
+
+#define PHY_AUTONEG_ADV                        (0x04)
+#define NWAY_AR_NEXT_PAGE_             (0x8000)
+#define NWAY_AR_REMOTE_FAULT_          (0x2000)
+#define NWAY_AR_ASM_DIR_               (0x0800)
+#define NWAY_AR_PAUSE_                 (0x0400)
+#define NWAY_AR_100T4_CAPS_            (0x0200)
+#define NWAY_AR_100TX_FD_CAPS_         (0x0100)
+#define NWAY_AR_SELECTOR_FIELD_                (0x001F)
+#define NWAY_AR_100TX_HD_CAPS_         (0x0080)
+#define NWAY_AR_10T_FD_CAPS_           (0x0040)
+#define NWAY_AR_10T_HD_CAPS_           (0x0020)
+#define NWAY_AR_ALL_CAPS_              (NWAY_AR_10T_HD_CAPS_ | \
+                                        NWAY_AR_10T_FD_CAPS_ | \
+                                        NWAY_AR_100TX_HD_CAPS_ | \
+                                        NWAY_AR_100TX_FD_CAPS_)
+#define NWAY_AR_PAUSE_MASK             (NWAY_AR_PAUSE_ | NWAY_AR_ASM_DIR_)
+
+#define PHY_LP_ABILITY                 (0x05)
+#define NWAY_LPAR_NEXT_PAGE_           (0x8000)
+#define NWAY_LPAR_ACKNOWLEDGE_         (0x4000)
+#define NWAY_LPAR_REMOTE_FAULT_                (0x2000)
+#define NWAY_LPAR_ASM_DIR_             (0x0800)
+#define NWAY_LPAR_PAUSE_               (0x0400)
+#define NWAY_LPAR_100T4_CAPS_          (0x0200)
+#define NWAY_LPAR_100TX_FD_CAPS_       (0x0100)
+#define NWAY_LPAR_100TX_HD_CAPS_       (0x0080)
+#define NWAY_LPAR_10T_FD_CAPS_         (0x0040)
+#define NWAY_LPAR_10T_HD_CAPS_         (0x0020)
+#define NWAY_LPAR_SELECTOR_FIELD_      (0x001F)
+
+#define PHY_AUTONEG_EXP                        (0x06)
+#define NWAY_ER_PAR_DETECT_FAULT_      (0x0010)
+#define NWAY_ER_LP_NEXT_PAGE_CAPS_     (0x0008)
+#define NWAY_ER_NEXT_PAGE_CAPS_                (0x0004)
+#define NWAY_ER_PAGE_RXD_              (0x0002)
+#define NWAY_ER_LP_NWAY_CAPS_          (0x0001)
+
+#define PHY_NEXT_PAGE_TX               (0x07)
+#define NPTX_NEXT_PAGE_                        (0x8000)
+#define NPTX_MSG_PAGE_                 (0x2000)
+#define NPTX_ACKNOWLDGE2_              (0x1000)
+#define NPTX_TOGGLE_                   (0x0800)
+#define NPTX_MSG_CODE_FIELD_           (0x0001)
+
+#define PHY_LP_NEXT_PAGE               (0x08)
+#define LP_RNPR_NEXT_PAGE_             (0x8000)
+#define LP_RNPR_ACKNOWLDGE_            (0x4000)
+#define LP_RNPR_MSG_PAGE_              (0x2000)
+#define LP_RNPR_ACKNOWLDGE2_           (0x1000)
+#define LP_RNPR_TOGGLE_                        (0x0800)
+#define LP_RNPR_MSG_CODE_FIELD_                (0x0001)
+
+#define PHY_1000T_CTRL                 (0x09)
+#define CR_1000T_TEST_MODE_4_          (0x8000)
+#define CR_1000T_TEST_MODE_3_          (0x6000)
+#define CR_1000T_TEST_MODE_2_          (0x4000)
+#define CR_1000T_TEST_MODE_1_          (0x2000)
+#define CR_1000T_MS_ENABLE_            (0x1000)
+#define CR_1000T_MS_VALUE_             (0x0800)
+#define CR_1000T_REPEATER_DTE_         (0x0400)
+#define CR_1000T_FD_CAPS_              (0x0200)
+#define CR_1000T_HD_CAPS_              (0x0100)
+#define CR_1000T_ASYM_PAUSE_           (0x0080)
+#define CR_1000T_TEST_MODE_NORMAL_     (0x0000)
+
+#define PHY_1000T_STATUS               (0x0A)
+#define SR_1000T_MS_CONFIG_FAULT_      (0x8000)
+#define SR_1000T_MS_CONFIG_RES_                (0x4000)
+#define SR_1000T_LOCAL_RX_STATUS_      (0x2000)
+#define SR_1000T_REMOTE_RX_STATUS_     (0x1000)
+#define SR_1000T_LP_FD_CAPS_           (0x0800)
+#define SR_1000T_LP_HD_CAPS_           (0x0400)
+#define SR_1000T_ASYM_PAUSE_DIR_       (0x0100)
+#define SR_1000T_IDLE_ERROR_CNT_       (0x00FF)
+#define SR_1000T_REMOTE_RX_STATUS_SHIFT                12
+#define SR_1000T_LOCAL_RX_STATUS_SHIFT         13
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT  5
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_20          20
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_100         100
+
+#define PHY_EXT_STATUS                 (0x0F)
+#define IEEE_ESR_1000X_FD_CAPS_                (0x8000)
+#define IEEE_ESR_1000X_HD_CAPS_                (0x4000)
+#define IEEE_ESR_1000T_FD_CAPS_                (0x2000)
+#define IEEE_ESR_1000T_HD_CAPS_                (0x1000)
+#define PHY_TX_POLARITY_MASK_          (0x0100)
+#define PHY_TX_NORMAL_POLARITY_                (0x0000)
+#define AUTO_POLARITY_DISABLE_         (0x0010)
+
+#define PHY_MMD_CTL                    (0x0D)
+#define PHY_MMD_CTRL_OP_MASK_          (0xC000)
+#define PHY_MMD_CTRL_OP_REG_           (0x0000)
+#define PHY_MMD_CTRL_OP_DNI_           (0x4000)
+#define PHY_MMD_CTRL_OP_DPIRW_         (0x8000)
+#define PHY_MMD_CTRL_OP_DPIWO_         (0xC000)
+#define PHY_MMD_CTRL_DEV_ADDR_MASK_    (0x001F)
+
+#define PHY_MMD_REG_DATA               (0x0E)
+
+/* VTSE Vendor Specific registers */
+#define PHY_VTSE_BYPASS                                (0x12)
+#define PHY_VTSE_BYPASS_DISABLE_PAIR_SWAP_     (0x0020)
+
+#define PHY_VTSE_INT_MASK                      (0x19)
+#define PHY_VTSE_INT_MASK_MDINTPIN_EN_         (0x8000)
+#define PHY_VTSE_INT_MASK_SPEED_CHANGE_                (0x4000)
+#define PHY_VTSE_INT_MASK_LINK_CHANGE_         (0x2000)
+#define PHY_VTSE_INT_MASK_FDX_CHANGE_          (0x1000)
+#define PHY_VTSE_INT_MASK_AUTONEG_ERR_         (0x0800)
+#define PHY_VTSE_INT_MASK_AUTONEG_DONE_                (0x0400)
+#define PHY_VTSE_INT_MASK_POE_DETECT_          (0x0200)
+#define PHY_VTSE_INT_MASK_SYMBOL_ERR_          (0x0100)
+#define PHY_VTSE_INT_MASK_FAST_LINK_FAIL_      (0x0080)
+#define PHY_VTSE_INT_MASK_WOL_EVENT_           (0x0040)
+#define PHY_VTSE_INT_MASK_EXTENDED_INT_                (0x0020)
+#define PHY_VTSE_INT_MASK_RESERVED_            (0x0010)
+#define PHY_VTSE_INT_MASK_FALSE_CARRIER_       (0x0008)
+#define PHY_VTSE_INT_MASK_LINK_SPEED_DS_       (0x0004)
+#define PHY_VTSE_INT_MASK_MASTER_SLAVE_DONE_   (0x0002)
+#define PHY_VTSE_INT_MASK_RX__ER_              (0x0001)
+
+#define PHY_VTSE_INT_STS                       (0x1A)
+#define PHY_VTSE_INT_STS_INT_ACTIVE_           (0x8000)
+#define PHY_VTSE_INT_STS_SPEED_CHANGE_         (0x4000)
+#define PHY_VTSE_INT_STS_LINK_CHANGE_          (0x2000)
+#define PHY_VTSE_INT_STS_FDX_CHANGE_           (0x1000)
+#define PHY_VTSE_INT_STS_AUTONEG_ERR_          (0x0800)
+#define PHY_VTSE_INT_STS_AUTONEG_DONE_         (0x0400)
+#define PHY_VTSE_INT_STS_POE_DETECT_           (0x0200)
+#define PHY_VTSE_INT_STS_SYMBOL_ERR_           (0x0100)
+#define PHY_VTSE_INT_STS_FAST_LINK_FAIL_       (0x0080)
+#define PHY_VTSE_INT_STS_WOL_EVENT_            (0x0040)
+#define PHY_VTSE_INT_STS_EXTENDED_INT_         (0x0020)
+#define PHY_VTSE_INT_STS_RESERVED_             (0x0010)
+#define PHY_VTSE_INT_STS_FALSE_CARRIER_                (0x0008)
+#define PHY_VTSE_INT_STS_LINK_SPEED_DS_                (0x0004)
+#define PHY_VTSE_INT_STS_MASTER_SLAVE_DONE_    (0x0002)
+#define PHY_VTSE_INT_STS_RX_ER_                        (0x0001)
+
+/* VTSE PHY registers */
+#define PHY_EXT_GPIO_PAGE              (0x1F)
+#define PHY_EXT_GPIO_PAGE_SPACE_0      (0x0000)
+#define PHY_EXT_GPIO_PAGE_SPACE_1      (0x0001)
+#define PHY_EXT_GPIO_PAGE_SPACE_2      (0x0002)
+
+/* Extended Register Page 1 space */
+#define PHY_EXT_MODE_CTRL              (0x13)
+#define PHY_EXT_MODE_CTRL_MDIX_MASK_   (0x000C)
+#define PHY_EXT_MODE_CTRL_AUTO_MDIX_   (0x0000)
+#define PHY_EXT_MODE_CTRL_MDI_         (0x0008)
+#define PHY_EXT_MODE_CTRL_MDI_X_       (0x000C)
+
+#define PHY_ANA_10BASE_T_HD            0x01
+#define PHY_ANA_10BASE_T_FD            0x02
+#define PHY_ANA_100BASE_TX_HD          0x04
+#define PHY_ANA_100BASE_TX_FD          0x08
+#define PHY_ANA_1000BASE_T_FD          0x10
+#define PHY_ANA_ALL_SUPPORTED_MEDIA    (PHY_ANA_10BASE_T_HD |   \
+                                        PHY_ANA_10BASE_T_FD |   \
+                                        PHY_ANA_100BASE_TX_HD | \
+                                        PHY_ANA_100BASE_TX_FD | \
+                                        PHY_ANA_1000BASE_T_FD)
+/* PHY MMD registers */
+#define PHY_MMD_DEV_3                          3
+
+#define PHY_EEE_PCS_STATUS                     (0x1)
+#define PHY_EEE_PCS_STATUS_TX_LPI_RCVD_                ((WORD)0x0800)
+#define PHY_EEE_PCS_STATUS_RX_LPI_RCVD_                ((WORD)0x0400)
+#define PHY_EEE_PCS_STATUS_TX_LPI_IND_         ((WORD)0x0200)
+#define PHY_EEE_PCS_STATUS_RX_LPI_IND_         ((WORD)0x0100)
+#define PHY_EEE_PCS_STATUS_PCS_RCV_LNK_STS_    ((WORD)0x0004)
+
+#define PHY_EEE_CAPABILITIES                   (0x14)
+#define PHY_EEE_CAPABILITIES_1000BT_EEE_       ((WORD)0x0004)
+#define PHY_EEE_CAPABILITIES_100BT_EEE_                ((WORD)0x0002)
+
+#define PHY_MMD_DEV_7                          7
+
+#define PHY_EEE_ADVERTISEMENT                  (0x3C)
+#define PHY_EEE_ADVERTISEMENT_1000BT_EEE_      ((WORD)0x0004)
+#define PHY_EEE_ADVERTISEMENT_100BT_EEE_       ((WORD)0x0002)
+
+#define PHY_EEE_LP_ADVERTISEMENT               (0x3D)
+#define PHY_EEE_1000BT_EEE_CAPABLE_            ((WORD)0x0004)
+#define PHY_EEE_100BT_EEE_CAPABLE_             ((WORD)0x0002)
+#endif /* _LAN78XX_H */
index 9d43460ce3c71f0b54c69b84fa5a0ec8b7341d5f..1f7a7cd97e50277e48487e18eaeafc9406b27f46 100644 (file)
@@ -785,6 +785,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x413c, 0x81a4, 8)},    /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a8, 8)},    /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a9, 8)},    /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+       {QMI_FIXED_INTF(0x413c, 0x81b1, 8)},    /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
        {QMI_FIXED_INTF(0x03f0, 0x581d, 4)},    /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
 
        /* 4. Gobi 1000 devices */
index ad8cbc6c9ee73513d7bc063ca8d41d75abce3038..fe4ec324aebc0284f3a72849dcfc6cc9187196a8 100644 (file)
 
 /* USB_USB_CTRL */
 #define RX_AGG_DISABLE         0x0010
+#define RX_ZERO_EN             0x0080
 
 /* USB_U2P3_CTRL */
 #define U2P3_ENABLE            0x0001
@@ -622,6 +623,7 @@ enum rtl_version {
        RTL_VER_03,
        RTL_VER_04,
        RTL_VER_05,
+       RTL_VER_06,
        RTL_VER_MAX
 };
 
@@ -2610,7 +2612,10 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
        u32 ocp_data;
        u16 data;
 
-       ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+       if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 ||
+           tp->version == RTL_VER_05)
+               ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
+
        data = r8152_mdio_read(tp, MII_BMCR);
        if (data & BMCR_PDOWN) {
                data &= ~BMCR_PDOWN;
@@ -2711,7 +2716,7 @@ static void r8153_first_init(struct r8152 *tp)
 
        /* rx aggregation */
        ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
-       ocp_data &= ~RX_AGG_DISABLE;
+       ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
        ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
 }
 
@@ -3241,7 +3246,7 @@ static void r8152b_init(struct r8152 *tp)
 
        /* enable rx aggregation */
        ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
-       ocp_data &= ~RX_AGG_DISABLE;
+       ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN);
        ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
 }
 
@@ -3287,6 +3292,13 @@ static void r8153_init(struct r8152 *tp)
                ocp_data &= ~ECM_ALDPS;
                ocp_write_byte(tp, MCU_TYPE_PLA, PLA_DMY_REG0, ocp_data);
 
+               ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1);
+               if (ocp_read_word(tp, MCU_TYPE_USB, USB_BURST_SIZE) == 0)
+                       ocp_data &= ~DYNAMIC_BURST;
+               else
+                       ocp_data |= DYNAMIC_BURST;
+               ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data);
+       } else if (tp->version == RTL_VER_06) {
                ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1);
                if (ocp_read_word(tp, MCU_TYPE_USB, USB_BURST_SIZE) == 0)
                        ocp_data &= ~DYNAMIC_BURST;
@@ -3988,6 +4000,10 @@ static void r8152b_get_version(struct r8152 *tp)
                tp->version = RTL_VER_05;
                tp->mii.supports_gmii = 1;
                break;
+       case 0x5c30:
+               tp->version = RTL_VER_06;
+               tp->mii.supports_gmii = 1;
+               break;
        default:
                netif_info(tp, probe, tp->netdev,
                           "Unknown version 0x%04x\n", version);
@@ -4033,6 +4049,7 @@ static int rtl_ops_init(struct r8152 *tp)
        case RTL_VER_03:
        case RTL_VER_04:
        case RTL_VER_05:
+       case RTL_VER_06:
                ops->init               = r8153_init;
                ops->enable             = rtl8153_enable;
                ops->disable            = rtl8153_disable;
index 34c519eb1db5092a6e1bd17e02b9a3e53a5c5cb2..e90f7a484e1c741b5b51c04481a3d749bd6ab7bd 100644 (file)
 #include <net/ip6_tunnel.h>
 #include <net/ip6_checksum.h>
 #endif
+#include <net/dst_metadata.h>
 
 #define VXLAN_VERSION  "0.1"
 
 #define PORT_HASH_BITS 8
 #define PORT_HASH_SIZE  (1<<PORT_HASH_BITS)
-#define VNI_HASH_BITS  10
-#define VNI_HASH_SIZE  (1<<VNI_HASH_BITS)
-#define FDB_HASH_BITS  8
-#define FDB_HASH_SIZE  (1<<FDB_HASH_BITS)
 #define FDB_AGE_DEFAULT 300 /* 5 min */
 #define FDB_AGE_INTERVAL (10 * HZ)     /* rescan interval */
 
@@ -74,9 +71,13 @@ module_param(log_ecn_error, bool, 0644);
 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 
 static int vxlan_net_id;
+static struct rtnl_link_ops vxlan_link_ops;
 
 static const u8 all_zeros_mac[ETH_ALEN];
 
+static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
+                                        bool no_share, u32 flags);
+
 /* per-network namespace private data for this module */
 struct vxlan_net {
        struct list_head  vxlan_list;
@@ -84,21 +85,6 @@ struct vxlan_net {
        spinlock_t        sock_lock;
 };
 
-union vxlan_addr {
-       struct sockaddr_in sin;
-       struct sockaddr_in6 sin6;
-       struct sockaddr sa;
-};
-
-struct vxlan_rdst {
-       union vxlan_addr         remote_ip;
-       __be16                   remote_port;
-       u32                      remote_vni;
-       u32                      remote_ifindex;
-       struct list_head         list;
-       struct rcu_head          rcu;
-};
-
 /* Forwarding table entry */
 struct vxlan_fdb {
        struct hlist_node hlist;        /* linked list of entries */
@@ -106,40 +92,21 @@ struct vxlan_fdb {
        unsigned long     updated;      /* jiffies */
        unsigned long     used;
        struct list_head  remotes;
+       u8                eth_addr[ETH_ALEN];
        u16               state;        /* see ndm_state */
        u8                flags;        /* see ndm_flags */
-       u8                eth_addr[ETH_ALEN];
-};
-
-/* Pseudo network device */
-struct vxlan_dev {
-       struct hlist_node hlist;        /* vni hash table */
-       struct list_head  next;         /* vxlan's per namespace list */
-       struct vxlan_sock *vn_sock;     /* listening socket */
-       struct net_device *dev;
-       struct net        *net;         /* netns for packet i/o */
-       struct vxlan_rdst default_dst;  /* default destination */
-       union vxlan_addr  saddr;        /* source address */
-       __be16            dst_port;
-       __u16             port_min;     /* source port range */
-       __u16             port_max;
-       __u8              tos;          /* TOS override */
-       __u8              ttl;
-       u32               flags;        /* VXLAN_F_* in vxlan.h */
-
-       unsigned long     age_interval;
-       struct timer_list age_timer;
-       spinlock_t        hash_lock;
-       unsigned int      addrcnt;
-       unsigned int      addrmax;
-
-       struct hlist_head fdb_head[FDB_HASH_SIZE];
 };
 
 /* salt for hash table */
 static u32 vxlan_salt __read_mostly;
 static struct workqueue_struct *vxlan_wq;
 
+static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
+{
+       return vs->flags & VXLAN_F_COLLECT_METADATA ||
+              ip_tunnel_collect_metadata();
+}
+
 #if IS_ENABLED(CONFIG_IPV6)
 static inline
 bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
@@ -345,7 +312,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
        if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
                goto nla_put_failure;
 
-       if (rdst->remote_port && rdst->remote_port != vxlan->dst_port &&
+       if (rdst->remote_port && rdst->remote_port != vxlan->cfg.dst_port &&
            nla_put_be16(skb, NDA_PORT, rdst->remote_port))
                goto nla_put_failure;
        if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
@@ -749,7 +716,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
                if (!(flags & NLM_F_CREATE))
                        return -ENOENT;
 
-               if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
+               if (vxlan->cfg.addrmax &&
+                   vxlan->addrcnt >= vxlan->cfg.addrmax)
                        return -ENOSPC;
 
                /* Disallow replace to add a multicast entry */
@@ -835,7 +803,7 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
                        return -EINVAL;
                *port = nla_get_be16(tb[NDA_PORT]);
        } else {
-               *port = vxlan->dst_port;
+               *port = vxlan->cfg.dst_port;
        }
 
        if (tb[NDA_VNI]) {
@@ -1021,7 +989,7 @@ static bool vxlan_snoop(struct net_device *dev,
                        vxlan_fdb_create(vxlan, src_mac, src_ip,
                                         NUD_REACHABLE,
                                         NLM_F_EXCL|NLM_F_CREATE,
-                                        vxlan->dst_port,
+                                        vxlan->cfg.dst_port,
                                         vxlan->default_dst.remote_vni,
                                         0, NTF_SELF);
                spin_unlock(&vxlan->hash_lock);
@@ -1062,7 +1030,7 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
        return false;
 }
 
-void vxlan_sock_release(struct vxlan_sock *vs)
+static void vxlan_sock_release(struct vxlan_sock *vs)
 {
        struct sock *sk = vs->sock->sk;
        struct net *net = sock_net(sk);
@@ -1078,7 +1046,6 @@ void vxlan_sock_release(struct vxlan_sock *vs)
 
        queue_work(vxlan_wq, &vs->del_work);
 }
-EXPORT_SYMBOL_GPL(vxlan_sock_release);
 
 /* Update multicast group membership when first VNI on
  * multicast address is brought up
@@ -1161,13 +1128,112 @@ static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh,
        return vh;
 }
 
+static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
+                     struct vxlan_metadata *md, u32 vni,
+                     struct metadata_dst *tun_dst)
+{
+       struct iphdr *oip = NULL;
+       struct ipv6hdr *oip6 = NULL;
+       struct vxlan_dev *vxlan;
+       struct pcpu_sw_netstats *stats;
+       union vxlan_addr saddr;
+       int err = 0;
+       union vxlan_addr *remote_ip;
+
+       /* For flow based devices, map all packets to VNI 0 */
+       if (vs->flags & VXLAN_F_FLOW_BASED)
+               vni = 0;
+
+       /* Is this VNI defined? */
+       vxlan = vxlan_vs_find_vni(vs, vni);
+       if (!vxlan)
+               goto drop;
+
+       remote_ip = &vxlan->default_dst.remote_ip;
+       skb_reset_mac_header(skb);
+       skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
+       skb->protocol = eth_type_trans(skb, vxlan->dev);
+       skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+
+       /* Ignore packet loops (and multicast echo) */
+       if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
+               goto drop;
+
+       /* Re-examine inner Ethernet packet */
+       if (remote_ip->sa.sa_family == AF_INET) {
+               oip = ip_hdr(skb);
+               saddr.sin.sin_addr.s_addr = oip->saddr;
+               saddr.sa.sa_family = AF_INET;
+#if IS_ENABLED(CONFIG_IPV6)
+       } else {
+               oip6 = ipv6_hdr(skb);
+               saddr.sin6.sin6_addr = oip6->saddr;
+               saddr.sa.sa_family = AF_INET6;
+#endif
+       }
+
+       if (tun_dst) {
+               skb_dst_set(skb, (struct dst_entry *)tun_dst);
+               tun_dst = NULL;
+       }
+
+       if ((vxlan->flags & VXLAN_F_LEARN) &&
+           vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
+               goto drop;
+
+       skb_reset_network_header(skb);
+       /* In flow-based mode, GBP is carried in dst_metadata */
+       if (!(vs->flags & VXLAN_F_FLOW_BASED))
+               skb->mark = md->gbp;
+
+       if (oip6)
+               err = IP6_ECN_decapsulate(oip6, skb);
+       if (oip)
+               err = IP_ECN_decapsulate(oip, skb);
+
+       if (unlikely(err)) {
+               if (log_ecn_error) {
+                       if (oip6)
+                               net_info_ratelimited("non-ECT from %pI6\n",
+                                                    &oip6->saddr);
+                       if (oip)
+                               net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+                                                    &oip->saddr, oip->tos);
+               }
+               if (err > 1) {
+                       ++vxlan->dev->stats.rx_frame_errors;
+                       ++vxlan->dev->stats.rx_errors;
+                       goto drop;
+               }
+       }
+
+       stats = this_cpu_ptr(vxlan->dev->tstats);
+       u64_stats_update_begin(&stats->syncp);
+       stats->rx_packets++;
+       stats->rx_bytes += skb->len;
+       u64_stats_update_end(&stats->syncp);
+
+       netif_rx(skb);
+
+       return;
+drop:
+       if (tun_dst)
+               dst_release((struct dst_entry *)tun_dst);
+
+       /* Consume bad packet */
+       kfree_skb(skb);
+}
+
 /* Callback from net/ipv4/udp.c to receive packets */
 static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 {
+       struct metadata_dst *tun_dst = NULL;
+       struct ip_tunnel_info *info;
        struct vxlan_sock *vs;
        struct vxlanhdr *vxh;
        u32 flags, vni;
-       struct vxlan_metadata md = {0};
+       struct vxlan_metadata _md;
+       struct vxlan_metadata *md = &_md;
 
        /* Need Vxlan and inner Ethernet header to be present */
        if (!pskb_may_pull(skb, VXLAN_HLEN))
@@ -1202,6 +1268,32 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
                vni &= VXLAN_VNI_MASK;
        }
 
+       if (vxlan_collect_metadata(vs)) {
+               const struct iphdr *iph = ip_hdr(skb);
+
+               tun_dst = metadata_dst_alloc(sizeof(*md), GFP_ATOMIC);
+               if (!tun_dst)
+                       goto drop;
+
+               info = &tun_dst->u.tun_info;
+               info->key.ipv4_src = iph->saddr;
+               info->key.ipv4_dst = iph->daddr;
+               info->key.ipv4_tos = iph->tos;
+               info->key.ipv4_ttl = iph->ttl;
+               info->key.tp_src = udp_hdr(skb)->source;
+               info->key.tp_dst = udp_hdr(skb)->dest;
+
+               info->mode = IP_TUNNEL_INFO_RX;
+               info->key.tun_flags = TUNNEL_KEY;
+               info->key.tun_id = cpu_to_be64(vni >> 8);
+               if (udp_hdr(skb)->check != 0)
+                       info->key.tun_flags |= TUNNEL_CSUM;
+
+               md = ip_tunnel_info_opts(info, sizeof(*md));
+       } else {
+               memset(md, 0, sizeof(*md));
+       }
+
        /* For backwards compatibility, only allow reserved fields to be
         * used by VXLAN extensions if explicitly requested.
         */
@@ -1209,13 +1301,16 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
                struct vxlanhdr_gbp *gbp;
 
                gbp = (struct vxlanhdr_gbp *)vxh;
-               md.gbp = ntohs(gbp->policy_id);
+               md->gbp = ntohs(gbp->policy_id);
+
+               if (tun_dst)
+                       info->key.tun_flags |= TUNNEL_VXLAN_OPT;
 
                if (gbp->dont_learn)
-                       md.gbp |= VXLAN_GBP_DONT_LEARN;
+                       md->gbp |= VXLAN_GBP_DONT_LEARN;
 
                if (gbp->policy_applied)
-                       md.gbp |= VXLAN_GBP_POLICY_APPLIED;
+                       md->gbp |= VXLAN_GBP_POLICY_APPLIED;
 
                flags &= ~VXLAN_GBP_USED_BITS;
        }
@@ -1233,8 +1328,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
                goto bad_flags;
        }
 
-       md.vni = vxh->vx_vni;
-       vs->rcv(vs, skb, &md);
+       vxlan_rcv(vs, skb, md, vni >> 8, tun_dst);
        return 0;
 
 drop:
@@ -1247,93 +1341,13 @@ bad_flags:
                   ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
 
 error:
+       if (tun_dst)
+               dst_release((struct dst_entry *)tun_dst);
+
        /* Return non vxlan pkt */
        return 1;
 }
 
-static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
-                     struct vxlan_metadata *md)
-{
-       struct iphdr *oip = NULL;
-       struct ipv6hdr *oip6 = NULL;
-       struct vxlan_dev *vxlan;
-       struct pcpu_sw_netstats *stats;
-       union vxlan_addr saddr;
-       __u32 vni;
-       int err = 0;
-       union vxlan_addr *remote_ip;
-
-       vni = ntohl(md->vni) >> 8;
-       /* Is this VNI defined? */
-       vxlan = vxlan_vs_find_vni(vs, vni);
-       if (!vxlan)
-               goto drop;
-
-       remote_ip = &vxlan->default_dst.remote_ip;
-       skb_reset_mac_header(skb);
-       skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
-       skb->protocol = eth_type_trans(skb, vxlan->dev);
-       skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
-
-       /* Ignore packet loops (and multicast echo) */
-       if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
-               goto drop;
-
-       /* Re-examine inner Ethernet packet */
-       if (remote_ip->sa.sa_family == AF_INET) {
-               oip = ip_hdr(skb);
-               saddr.sin.sin_addr.s_addr = oip->saddr;
-               saddr.sa.sa_family = AF_INET;
-#if IS_ENABLED(CONFIG_IPV6)
-       } else {
-               oip6 = ipv6_hdr(skb);
-               saddr.sin6.sin6_addr = oip6->saddr;
-               saddr.sa.sa_family = AF_INET6;
-#endif
-       }
-
-       if ((vxlan->flags & VXLAN_F_LEARN) &&
-           vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
-               goto drop;
-
-       skb_reset_network_header(skb);
-       skb->mark = md->gbp;
-
-       if (oip6)
-               err = IP6_ECN_decapsulate(oip6, skb);
-       if (oip)
-               err = IP_ECN_decapsulate(oip, skb);
-
-       if (unlikely(err)) {
-               if (log_ecn_error) {
-                       if (oip6)
-                               net_info_ratelimited("non-ECT from %pI6\n",
-                                                    &oip6->saddr);
-                       if (oip)
-                               net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
-                                                    &oip->saddr, oip->tos);
-               }
-               if (err > 1) {
-                       ++vxlan->dev->stats.rx_frame_errors;
-                       ++vxlan->dev->stats.rx_errors;
-                       goto drop;
-               }
-       }
-
-       stats = this_cpu_ptr(vxlan->dev->tstats);
-       u64_stats_update_begin(&stats->syncp);
-       stats->rx_packets++;
-       stats->rx_bytes += skb->len;
-       u64_stats_update_end(&stats->syncp);
-
-       netif_rx(skb);
-
-       return;
-drop:
-       /* Consume bad packet */
-       kfree_skb(skb);
-}
-
 static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -1672,7 +1686,7 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
                           struct sk_buff *skb,
                           struct net_device *dev, struct in6_addr *saddr,
                           struct in6_addr *daddr, __u8 prio, __u8 ttl,
-                          __be16 src_port, __be16 dst_port,
+                          __be16 src_port, __be16 dst_port, __be32 vni,
                           struct vxlan_metadata *md, bool xnet, u32 vxflags)
 {
        struct vxlanhdr *vxh;
@@ -1722,7 +1736,7 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
 
        vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
        vxh->vx_flags = htonl(VXLAN_HF_VNI);
-       vxh->vx_vni = md->vni;
+       vxh->vx_vni = vni;
 
        if (type & SKB_GSO_TUNNEL_REMCSUM) {
                u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
@@ -1755,10 +1769,10 @@ err:
 }
 #endif
 
-int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
-                  __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
-                  __be16 src_port, __be16 dst_port,
-                  struct vxlan_metadata *md, bool xnet, u32 vxflags)
+static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
+                         __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
+                         __be16 src_port, __be16 dst_port, __be32 vni,
+                         struct vxlan_metadata *md, bool xnet, u32 vxflags)
 {
        struct vxlanhdr *vxh;
        int min_headroom;
@@ -1801,7 +1815,7 @@ int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
 
        vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
        vxh->vx_flags = htonl(VXLAN_HF_VNI);
-       vxh->vx_vni = md->vni;
+       vxh->vx_vni = vni;
 
        if (type & SKB_GSO_TUNNEL_REMCSUM) {
                u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
@@ -1828,7 +1842,6 @@ int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
                                   ttl, df, src_port, dst_port, xnet,
                                   !(vxflags & VXLAN_F_UDP_CSUM));
 }
-EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
 
 /* Bypass encapsulation if the destination is local */
 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
@@ -1878,22 +1891,43 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
 static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                           struct vxlan_rdst *rdst, bool did_rsc)
 {
+       struct ip_tunnel_info *info;
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct sock *sk = vxlan->vn_sock->sock->sk;
        struct rtable *rt = NULL;
        const struct iphdr *old_iph;
        struct flowi4 fl4;
        union vxlan_addr *dst;
-       struct vxlan_metadata md;
+       union vxlan_addr remote_ip;
+       struct vxlan_metadata _md;
+       struct vxlan_metadata *md = &_md;
        __be16 src_port = 0, dst_port;
        u32 vni;
        __be16 df = 0;
        __u8 tos, ttl;
        int err;
+       u32 flags = vxlan->flags;
+
+       /* FIXME: Support IPv6 */
+       info = skb_tunnel_info(skb, AF_INET);
+
+       if (rdst) {
+               dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
+               vni = rdst->remote_vni;
+               dst = &rdst->remote_ip;
+       } else {
+               if (!info) {
+                       WARN_ONCE(1, "%s: Missing encapsulation instructions\n",
+                                 dev->name);
+                       goto drop;
+               }
 
-       dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
-       vni = rdst->remote_vni;
-       dst = &rdst->remote_ip;
+               dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
+               vni = be64_to_cpu(info->key.tun_id);
+               remote_ip.sin.sin_family = AF_INET;
+               remote_ip.sin.sin_addr.s_addr = info->key.ipv4_dst;
+               dst = &remote_ip;
+       }
 
        if (vxlan_addr_any(dst)) {
                if (did_rsc) {
@@ -1906,25 +1940,42 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
 
        old_iph = ip_hdr(skb);
 
-       ttl = vxlan->ttl;
+       ttl = vxlan->cfg.ttl;
        if (!ttl && vxlan_addr_multicast(dst))
                ttl = 1;
 
-       tos = vxlan->tos;
+       tos = vxlan->cfg.tos;
        if (tos == 1)
                tos = ip_tunnel_get_dsfield(old_iph, skb);
 
-       src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->port_min,
-                                    vxlan->port_max, true);
+       src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
+                                    vxlan->cfg.port_max, true);
 
        if (dst->sa.sa_family == AF_INET) {
+               if (info) {
+                       if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
+                               df = htons(IP_DF);
+                       if (info->key.tun_flags & TUNNEL_CSUM)
+                               flags |= VXLAN_F_UDP_CSUM;
+                       else
+                               flags &= ~VXLAN_F_UDP_CSUM;
+
+                       ttl = info->key.ipv4_ttl;
+                       tos = info->key.ipv4_tos;
+
+                       if (info->options_len)
+                               md = ip_tunnel_info_opts(info, sizeof(*md));
+               } else {
+                       md->gbp = skb->mark;
+               }
+
                memset(&fl4, 0, sizeof(fl4));
-               fl4.flowi4_oif = rdst->remote_ifindex;
+               fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
                fl4.flowi4_tos = RT_TOS(tos);
                fl4.flowi4_mark = skb->mark;
                fl4.flowi4_proto = IPPROTO_UDP;
                fl4.daddr = dst->sin.sin_addr.s_addr;
-               fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
+               fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr;
 
                rt = ip_route_output_key(vxlan->net, &fl4);
                if (IS_ERR(rt)) {
@@ -1958,14 +2009,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
 
                tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
                ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
-               md.vni = htonl(vni << 8);
-               md.gbp = skb->mark;
-
                err = vxlan_xmit_skb(rt, sk, skb, fl4.saddr,
                                     dst->sin.sin_addr.s_addr, tos, ttl, df,
-                                    src_port, dst_port, &md,
+                                    src_port, dst_port, htonl(vni << 8), md,
                                     !net_eq(vxlan->net, dev_net(vxlan->dev)),
-                                    vxlan->flags);
+                                    flags);
                if (err < 0) {
                        /* skb is already freed. */
                        skb = NULL;
@@ -1980,13 +2028,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                u32 flags;
 
                memset(&fl6, 0, sizeof(fl6));
-               fl6.flowi6_oif = rdst->remote_ifindex;
+               fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0;
                fl6.daddr = dst->sin6.sin6_addr;
-               fl6.saddr = vxlan->saddr.sin6.sin6_addr;
+               fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
                fl6.flowi6_mark = skb->mark;
                fl6.flowi6_proto = IPPROTO_UDP;
 
-               if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) {
+               if (ipv6_stub->ipv6_dst_lookup(vxlan->net, sk, &ndst, &fl6)) {
                        netdev_dbg(dev, "no route to %pI6\n",
                                   &dst->sin6.sin6_addr);
                        dev->stats.tx_carrier_errors++;
@@ -2018,11 +2066,10 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                }
 
                ttl = ttl ? : ip6_dst_hoplimit(ndst);
-               md.vni = htonl(vni << 8);
-               md.gbp = skb->mark;
+               md->gbp = skb->mark;
 
                err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr,
-                                     0, ttl, src_port, dst_port, &md,
+                                     0, ttl, src_port, dst_port, htonl(vni << 8), md,
                                      !net_eq(vxlan->net, dev_net(vxlan->dev)),
                                      vxlan->flags);
 #endif
@@ -2051,11 +2098,15 @@ tx_free:
 static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
+       const struct ip_tunnel_info *info;
        struct ethhdr *eth;
        bool did_rsc = false;
        struct vxlan_rdst *rdst, *fdst = NULL;
        struct vxlan_fdb *f;
 
+       /* FIXME: Support IPv6 */
+       info = skb_tunnel_info(skb, AF_INET);
+
        skb_reset_mac_header(skb);
        eth = eth_hdr(skb);
 
@@ -2078,6 +2129,12 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
 #endif
        }
 
+       if (vxlan->flags & VXLAN_F_FLOW_BASED &&
+           info && info->mode == IP_TUNNEL_INFO_TX) {
+               vxlan_xmit_one(skb, dev, NULL, false);
+               return NETDEV_TX_OK;
+       }
+
        f = vxlan_find_mac(vxlan, eth->h_dest);
        did_rsc = false;
 
@@ -2143,7 +2200,7 @@ static void vxlan_cleanup(unsigned long arg)
                        if (f->state & NUD_PERMANENT)
                                continue;
 
-                       timeout = f->used + vxlan->age_interval * HZ;
+                       timeout = f->used + vxlan->cfg.age_interval * HZ;
                        if (time_before_eq(timeout, jiffies)) {
                                netdev_dbg(vxlan->dev,
                                           "garbage collect %pM\n",
@@ -2207,8 +2264,8 @@ static int vxlan_open(struct net_device *dev)
        struct vxlan_sock *vs;
        int ret = 0;
 
-       vs = vxlan_sock_add(vxlan->net, vxlan->dst_port, vxlan_rcv, NULL,
-                           false, vxlan->flags);
+       vs = vxlan_sock_add(vxlan->net, vxlan->cfg.dst_port,
+                           vxlan->cfg.no_share, vxlan->flags);
        if (IS_ERR(vs))
                return PTR_ERR(vs);
 
@@ -2222,7 +2279,7 @@ static int vxlan_open(struct net_device *dev)
                }
        }
 
-       if (vxlan->age_interval)
+       if (vxlan->cfg.age_interval)
                mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
 
        return ret;
@@ -2380,7 +2437,7 @@ static void vxlan_setup(struct net_device *dev)
        vxlan->age_timer.function = vxlan_cleanup;
        vxlan->age_timer.data = (unsigned long) vxlan;
 
-       vxlan->dst_port = htons(vxlan_port);
+       vxlan->cfg.dst_port = htons(vxlan_port);
 
        vxlan->dev = dev;
 
@@ -2405,6 +2462,8 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
        [IFLA_VXLAN_RSC]        = { .type = NLA_U8 },
        [IFLA_VXLAN_L2MISS]     = { .type = NLA_U8 },
        [IFLA_VXLAN_L3MISS]     = { .type = NLA_U8 },
+       [IFLA_VXLAN_FLOWBASED]  = { .type = NLA_U8 },
+       [IFLA_VXLAN_COLLECT_METADATA]   = { .type = NLA_U8 },
        [IFLA_VXLAN_PORT]       = { .type = NLA_U16 },
        [IFLA_VXLAN_UDP_CSUM]   = { .type = NLA_U8 },
        [IFLA_VXLAN_UDP_ZERO_CSUM6_TX]  = { .type = NLA_U8 },
@@ -2500,7 +2559,6 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
 
 /* Create new listen socket if needed */
 static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
-                                             vxlan_rcv_t *rcv, void *data,
                                              u32 flags)
 {
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
@@ -2529,8 +2587,6 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
 
        vs->sock = sock;
        atomic_set(&vs->refcnt, 1);
-       vs->rcv = rcv;
-       vs->data = data;
        vs->flags = (flags & VXLAN_F_RCV_FLAGS);
 
        /* Initialize the vxlan udp offloads structure */
@@ -2554,9 +2610,8 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
        return vs;
 }
 
-struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
-                                 vxlan_rcv_t *rcv, void *data,
-                                 bool no_share, u32 flags)
+static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
+                                        bool no_share, u32 flags)
 {
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
        struct vxlan_sock *vs;
@@ -2566,7 +2621,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
                spin_lock(&vn->sock_lock);
                vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port,
                                     flags);
-               if (vs && vs->rcv == rcv) {
+               if (vs) {
                        if (!atomic_add_unless(&vs->refcnt, 1, 0))
                                vs = ERR_PTR(-EBUSY);
                        spin_unlock(&vn->sock_lock);
@@ -2575,58 +2630,38 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
                spin_unlock(&vn->sock_lock);
        }
 
-       return vxlan_socket_create(net, port, rcv, data, flags);
+       return vxlan_socket_create(net, port, flags);
 }
-EXPORT_SYMBOL_GPL(vxlan_sock_add);
 
-static int vxlan_newlink(struct net *src_net, struct net_device *dev,
-                        struct nlattr *tb[], struct nlattr *data[])
+static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
+                              struct vxlan_config *conf)
 {
        struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_rdst *dst = &vxlan->default_dst;
-       __u32 vni;
        int err;
        bool use_ipv6 = false;
-
-       if (!data[IFLA_VXLAN_ID])
-               return -EINVAL;
+       __be16 default_port = vxlan->cfg.dst_port;
 
        vxlan->net = src_net;
 
-       vni = nla_get_u32(data[IFLA_VXLAN_ID]);
-       dst->remote_vni = vni;
-
-       /* Unless IPv6 is explicitly requested, assume IPv4 */
-       dst->remote_ip.sa.sa_family = AF_INET;
-       if (data[IFLA_VXLAN_GROUP]) {
-               dst->remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
-       } else if (data[IFLA_VXLAN_GROUP6]) {
-               if (!IS_ENABLED(CONFIG_IPV6))
-                       return -EPFNOSUPPORT;
+       dst->remote_vni = conf->vni;
 
-               dst->remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
-               dst->remote_ip.sa.sa_family = AF_INET6;
-               use_ipv6 = true;
-       }
+       memcpy(&dst->remote_ip, &conf->remote_ip, sizeof(conf->remote_ip));
 
-       if (data[IFLA_VXLAN_LOCAL]) {
-               vxlan->saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
-               vxlan->saddr.sa.sa_family = AF_INET;
-       } else if (data[IFLA_VXLAN_LOCAL6]) {
-               if (!IS_ENABLED(CONFIG_IPV6))
-                       return -EPFNOSUPPORT;
+       /* Unless IPv6 is explicitly requested, assume IPv4 */
+       if (!dst->remote_ip.sa.sa_family)
+               dst->remote_ip.sa.sa_family = AF_INET;
 
-               /* TODO: respect scope id */
-               vxlan->saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
-               vxlan->saddr.sa.sa_family = AF_INET6;
+       if (dst->remote_ip.sa.sa_family == AF_INET6 ||
+           vxlan->cfg.saddr.sa.sa_family == AF_INET6)
                use_ipv6 = true;
-       }
 
-       if (data[IFLA_VXLAN_LINK] &&
-           (dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
+       if (conf->remote_ifindex) {
                struct net_device *lowerdev
-                        = __dev_get_by_index(src_net, dst->remote_ifindex);
+                        = __dev_get_by_index(src_net, conf->remote_ifindex);
+
+               dst->remote_ifindex = conf->remote_ifindex;
 
                if (!lowerdev) {
                        pr_info("ifindex %d does not exist\n", dst->remote_ifindex);
@@ -2644,7 +2679,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
                }
 #endif
 
-               if (!tb[IFLA_MTU])
+               if (!conf->mtu)
                        dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
 
                dev->needed_headroom = lowerdev->hard_header_len +
@@ -2652,101 +2687,192 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
        } else if (use_ipv6)
                vxlan->flags |= VXLAN_F_IPV6;
 
+       memcpy(&vxlan->cfg, conf, sizeof(*conf));
+       if (!vxlan->cfg.dst_port)
+               vxlan->cfg.dst_port = default_port;
+       vxlan->flags |= conf->flags;
+
+       if (!vxlan->cfg.age_interval)
+               vxlan->cfg.age_interval = FDB_AGE_DEFAULT;
+
+       if (vxlan_find_vni(src_net, conf->vni, use_ipv6 ? AF_INET6 : AF_INET,
+                          vxlan->cfg.dst_port, vxlan->flags))
+               return -EEXIST;
+
+       dev->ethtool_ops = &vxlan_ethtool_ops;
+
+       /* create an fdb entry for a valid default destination */
+       if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
+               err = vxlan_fdb_create(vxlan, all_zeros_mac,
+                                      &vxlan->default_dst.remote_ip,
+                                      NUD_REACHABLE|NUD_PERMANENT,
+                                      NLM_F_EXCL|NLM_F_CREATE,
+                                      vxlan->cfg.dst_port,
+                                      vxlan->default_dst.remote_vni,
+                                      vxlan->default_dst.remote_ifindex,
+                                      NTF_SELF);
+               if (err)
+                       return err;
+       }
+
+       err = register_netdevice(dev);
+       if (err) {
+               vxlan_fdb_delete_default(vxlan);
+               return err;
+       }
+
+       list_add(&vxlan->next, &vn->vxlan_list);
+
+       return 0;
+}
+
+struct net_device *vxlan_dev_create(struct net *net, const char *name,
+                                   u8 name_assign_type, struct vxlan_config *conf)
+{
+       struct nlattr *tb[IFLA_MAX+1];
+       struct net_device *dev;
+       int err;
+
+       memset(&tb, 0, sizeof(tb));
+
+       dev = rtnl_create_link(net, name, name_assign_type,
+                              &vxlan_link_ops, tb);
+       if (IS_ERR(dev))
+               return dev;
+
+       err = vxlan_dev_configure(net, dev, conf);
+       if (err < 0) {
+               free_netdev(dev);
+               return ERR_PTR(err);
+       }
+
+       return dev;
+}
+EXPORT_SYMBOL_GPL(vxlan_dev_create);
+
+static int vxlan_newlink(struct net *src_net, struct net_device *dev,
+                        struct nlattr *tb[], struct nlattr *data[])
+{
+       struct vxlan_config conf;
+       int err;
+
+       if (!data[IFLA_VXLAN_ID])
+               return -EINVAL;
+
+       memset(&conf, 0, sizeof(conf));
+       conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]);
+
+       if (data[IFLA_VXLAN_GROUP]) {
+               conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
+       } else if (data[IFLA_VXLAN_GROUP6]) {
+               if (!IS_ENABLED(CONFIG_IPV6))
+                       return -EPFNOSUPPORT;
+
+               conf.remote_ip.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_GROUP6]);
+               conf.remote_ip.sa.sa_family = AF_INET6;
+       }
+
+       if (data[IFLA_VXLAN_LOCAL]) {
+               conf.saddr.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_LOCAL]);
+               conf.saddr.sa.sa_family = AF_INET;
+       } else if (data[IFLA_VXLAN_LOCAL6]) {
+               if (!IS_ENABLED(CONFIG_IPV6))
+                       return -EPFNOSUPPORT;
+
+               /* TODO: respect scope id */
+               conf.saddr.sin6.sin6_addr = nla_get_in6_addr(data[IFLA_VXLAN_LOCAL6]);
+               conf.saddr.sa.sa_family = AF_INET6;
+       }
+
+       if (data[IFLA_VXLAN_LINK])
+               conf.remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]);
+
        if (data[IFLA_VXLAN_TOS])
-               vxlan->tos  = nla_get_u8(data[IFLA_VXLAN_TOS]);
+               conf.tos  = nla_get_u8(data[IFLA_VXLAN_TOS]);
 
        if (data[IFLA_VXLAN_TTL])
-               vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
+               conf.ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
 
        if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
-               vxlan->flags |= VXLAN_F_LEARN;
+               conf.flags |= VXLAN_F_LEARN;
 
        if (data[IFLA_VXLAN_AGEING])
-               vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
-       else
-               vxlan->age_interval = FDB_AGE_DEFAULT;
+               conf.age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
 
        if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
-               vxlan->flags |= VXLAN_F_PROXY;
+               conf.flags |= VXLAN_F_PROXY;
 
        if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
-               vxlan->flags |= VXLAN_F_RSC;
+               conf.flags |= VXLAN_F_RSC;
 
        if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
-               vxlan->flags |= VXLAN_F_L2MISS;
+               conf.flags |= VXLAN_F_L2MISS;
 
        if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
-               vxlan->flags |= VXLAN_F_L3MISS;
+               conf.flags |= VXLAN_F_L3MISS;
 
        if (data[IFLA_VXLAN_LIMIT])
-               vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
+               conf.addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
+
+       if (data[IFLA_VXLAN_FLOWBASED] &&
+           nla_get_u8(data[IFLA_VXLAN_FLOWBASED]))
+               conf.flags |= VXLAN_F_FLOW_BASED;
+
+       if (data[IFLA_VXLAN_COLLECT_METADATA] &&
+           nla_get_u8(data[IFLA_VXLAN_COLLECT_METADATA]))
+               conf.flags |= VXLAN_F_COLLECT_METADATA;
 
        if (data[IFLA_VXLAN_PORT_RANGE]) {
                const struct ifla_vxlan_port_range *p
                        = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
-               vxlan->port_min = ntohs(p->low);
-               vxlan->port_max = ntohs(p->high);
+               conf.port_min = ntohs(p->low);
+               conf.port_max = ntohs(p->high);
        }
 
        if (data[IFLA_VXLAN_PORT])
-               vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
+               conf.dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]);
 
        if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM]))
-               vxlan->flags |= VXLAN_F_UDP_CSUM;
+               conf.flags |= VXLAN_F_UDP_CSUM;
 
        if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] &&
            nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX]))
-               vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
+               conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
 
        if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX] &&
            nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
-               vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
+               conf.flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
 
        if (data[IFLA_VXLAN_REMCSUM_TX] &&
            nla_get_u8(data[IFLA_VXLAN_REMCSUM_TX]))
-               vxlan->flags |= VXLAN_F_REMCSUM_TX;
+               conf.flags |= VXLAN_F_REMCSUM_TX;
 
        if (data[IFLA_VXLAN_REMCSUM_RX] &&
            nla_get_u8(data[IFLA_VXLAN_REMCSUM_RX]))
-               vxlan->flags |= VXLAN_F_REMCSUM_RX;
+               conf.flags |= VXLAN_F_REMCSUM_RX;
 
        if (data[IFLA_VXLAN_GBP])
-               vxlan->flags |= VXLAN_F_GBP;
+               conf.flags |= VXLAN_F_GBP;
 
        if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
-               vxlan->flags |= VXLAN_F_REMCSUM_NOPARTIAL;
-
-       if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET,
-                          vxlan->dst_port, vxlan->flags)) {
-               pr_info("duplicate VNI %u\n", vni);
-               return -EEXIST;
-       }
+               conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
 
-       dev->ethtool_ops = &vxlan_ethtool_ops;
+       err = vxlan_dev_configure(src_net, dev, &conf);
+       switch (err) {
+       case -ENODEV:
+               pr_info("ifindex %d does not exist\n", conf.remote_ifindex);
+               break;
 
-       /* create an fdb entry for a valid default destination */
-       if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
-               err = vxlan_fdb_create(vxlan, all_zeros_mac,
-                                      &vxlan->default_dst.remote_ip,
-                                      NUD_REACHABLE|NUD_PERMANENT,
-                                      NLM_F_EXCL|NLM_F_CREATE,
-                                      vxlan->dst_port,
-                                      vxlan->default_dst.remote_vni,
-                                      vxlan->default_dst.remote_ifindex,
-                                      NTF_SELF);
-               if (err)
-                       return err;
-       }
+       case -EPERM:
+               pr_info("IPv6 is disabled via sysctl\n");
+               break;
 
-       err = register_netdevice(dev);
-       if (err) {
-               vxlan_fdb_delete_default(vxlan);
-               return err;
+       case -EEXIST:
+               pr_info("duplicate VNI %u\n", conf.vni);
+               break;
        }
 
-       list_add(&vxlan->next, &vn->vxlan_list);
-
-       return 0;
+       return err;
 }
 
 static void vxlan_dellink(struct net_device *dev, struct list_head *head)
@@ -2777,6 +2903,7 @@ static size_t vxlan_get_size(const struct net_device *dev)
                nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_RSC */
                nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_L2MISS */
                nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_L3MISS */
+               nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_FLOWBASED */
                nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
                nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
                nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
@@ -2794,8 +2921,8 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
        const struct vxlan_dev *vxlan = netdev_priv(dev);
        const struct vxlan_rdst *dst = &vxlan->default_dst;
        struct ifla_vxlan_port_range ports = {
-               .low =  htons(vxlan->port_min),
-               .high = htons(vxlan->port_max),
+               .low =  htons(vxlan->cfg.port_min),
+               .high = htons(vxlan->cfg.port_max),
        };
 
        if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni))
@@ -2818,22 +2945,22 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
        if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
                goto nla_put_failure;
 
-       if (!vxlan_addr_any(&vxlan->saddr)) {
-               if (vxlan->saddr.sa.sa_family == AF_INET) {
+       if (!vxlan_addr_any(&vxlan->cfg.saddr)) {
+               if (vxlan->cfg.saddr.sa.sa_family == AF_INET) {
                        if (nla_put_in_addr(skb, IFLA_VXLAN_LOCAL,
-                                           vxlan->saddr.sin.sin_addr.s_addr))
+                                           vxlan->cfg.saddr.sin.sin_addr.s_addr))
                                goto nla_put_failure;
 #if IS_ENABLED(CONFIG_IPV6)
                } else {
                        if (nla_put_in6_addr(skb, IFLA_VXLAN_LOCAL6,
-                                            &vxlan->saddr.sin6.sin6_addr))
+                                            &vxlan->cfg.saddr.sin6.sin6_addr))
                                goto nla_put_failure;
 #endif
                }
        }
 
-       if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
-           nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
+       if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
+           nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
            nla_put_u8(skb, IFLA_VXLAN_LEARNING,
                        !!(vxlan->flags & VXLAN_F_LEARN)) ||
            nla_put_u8(skb, IFLA_VXLAN_PROXY,
@@ -2843,9 +2970,11 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
                        !!(vxlan->flags & VXLAN_F_L2MISS)) ||
            nla_put_u8(skb, IFLA_VXLAN_L3MISS,
                        !!(vxlan->flags & VXLAN_F_L3MISS)) ||
-           nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
-           nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax) ||
-           nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->dst_port) ||
+           nla_put_u8(skb, IFLA_VXLAN_FLOWBASED,
+                      !!(vxlan->flags & VXLAN_F_FLOW_BASED)) ||
+           nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->cfg.age_interval) ||
+           nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) ||
+           nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) ||
            nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM,
                        !!(vxlan->flags & VXLAN_F_UDP_CSUM)) ||
            nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX,
index 9729e69416358a120ef0c97cd3feb034cb0ca6bb..c04fb00e7930219f909939b9ea36140ad3101fb8 100644 (file)
@@ -11,7 +11,8 @@ ath10k_core-y += mac.o \
                 wmi-tlv.o \
                 bmi.o \
                 hw.o \
-                p2p.o
+                p2p.o \
+                swap.o
 
 ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
 ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
index 31a990635490aa07cf74a515e620c90fc1b697c8..df7c7616533b08636374736911ca3f624983a47e 100644 (file)
@@ -178,7 +178,7 @@ struct bmi_target_info {
 };
 
 /* in msec */
-#define BMI_COMMUNICATION_TIMEOUT_HZ (1*HZ)
+#define BMI_COMMUNICATION_TIMEOUT_HZ (2 * HZ)
 
 #define BMI_CE_NUM_TO_TARG 0
 #define BMI_CE_NUM_TO_HOST 1
index e508c65b6ba8a6d62ffbcca77e5d8f41ff036ad3..cf28fbebaedcfc9b372d509c6a88fcbc2d808773 100644 (file)
@@ -452,6 +452,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
 {
        struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
        unsigned int nentries_mask = dest_ring->nentries_mask;
+       struct ath10k *ar = ce_state->ar;
        unsigned int sw_index = dest_ring->sw_index;
 
        struct ce_desc *base = dest_ring->base_addr_owner_space;
index 0eddb204d85bb9b08dcb84f55530b25fb3b901e8..5c903e15dd65e6ad7da14caffb1e3a4fd8fe4bfc 100644 (file)
@@ -21,7 +21,7 @@
 #include "hif.h"
 
 /* Maximum number of Copy Engine's supported */
-#define CE_COUNT_MAX 8
+#define CE_COUNT_MAX 12
 #define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
 
 /* Descriptor rings must be aligned to this boundary */
@@ -38,8 +38,13 @@ struct ath10k_ce_pipe;
 
 #define CE_DESC_FLAGS_GATHER         (1 << 0)
 #define CE_DESC_FLAGS_BYTE_SWAP      (1 << 1)
-#define CE_DESC_FLAGS_META_DATA_MASK 0xFFFC
-#define CE_DESC_FLAGS_META_DATA_LSB  2
+
+/* Following desc flags are used in QCA99X0 */
+#define CE_DESC_FLAGS_HOST_INT_DIS     (1 << 2)
+#define CE_DESC_FLAGS_TGT_INT_DIS      (1 << 3)
+
+#define CE_DESC_FLAGS_META_DATA_MASK ar->hw_values->ce_desc_meta_data_mask
+#define CE_DESC_FLAGS_META_DATA_LSB  ar->hw_values->ce_desc_meta_data_lsb
 
 struct ce_desc {
        __le32 addr;
@@ -423,8 +428,10 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
 
 #define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
 
-#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB              8
-#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK             0x0000ff00
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \
+                               ar->regs->ce_wrap_intr_sum_host_msi_lsb
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK \
+                               ar->regs->ce_wrap_intr_sum_host_msi_mask
 #define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
        (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
                CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
index 59496a90ad5e241563f5bcaa8226ebfde87bd478..f79fa6c67ebc3cbcff75a802a426d8ccdb89d979 100644 (file)
@@ -49,6 +49,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
                .uart_pin = 7,
                .has_shifted_cc_wraparound = true,
+               .otp_exe_param = 0,
                .fw = {
                        .dir = QCA988X_HW_2_0_FW_DIR,
                        .fw = QCA988X_HW_2_0_FW_FILE,
@@ -63,6 +64,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .name = "qca6174 hw2.1",
                .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
                .uart_pin = 6,
+               .otp_exe_param = 0,
                .fw = {
                        .dir = QCA6174_HW_2_1_FW_DIR,
                        .fw = QCA6174_HW_2_1_FW_FILE,
@@ -77,6 +79,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .name = "qca6174 hw3.0",
                .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
                .uart_pin = 6,
+               .otp_exe_param = 0,
                .fw = {
                        .dir = QCA6174_HW_3_0_FW_DIR,
                        .fw = QCA6174_HW_3_0_FW_FILE,
@@ -91,6 +94,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                .name = "qca6174 hw3.2",
                .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
                .uart_pin = 6,
+               .otp_exe_param = 0,
                .fw = {
                        /* uses same binaries as hw3.0 */
                        .dir = QCA6174_HW_3_0_FW_DIR,
@@ -101,8 +105,68 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
                        .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
                },
        },
+       {
+               .id = QCA99X0_HW_2_0_DEV_VERSION,
+               .name = "qca99x0 hw2.0",
+               .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR,
+               .uart_pin = 7,
+               .otp_exe_param = 0x00000700,
+               .continuous_frag_desc = true,
+               .fw = {
+                       .dir = QCA99X0_HW_2_0_FW_DIR,
+                       .fw = QCA99X0_HW_2_0_FW_FILE,
+                       .otp = QCA99X0_HW_2_0_OTP_FILE,
+                       .board = QCA99X0_HW_2_0_BOARD_DATA_FILE,
+                       .board_size = QCA99X0_BOARD_DATA_SZ,
+                       .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
+               },
+       },
 };
 
+static const char *const ath10k_core_fw_feature_str[] = {
+       [ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX] = "wmi-mgmt-rx",
+       [ATH10K_FW_FEATURE_WMI_10X] = "wmi-10.x",
+       [ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX] = "has-wmi-mgmt-tx",
+       [ATH10K_FW_FEATURE_NO_P2P] = "no-p2p",
+       [ATH10K_FW_FEATURE_WMI_10_2] = "wmi-10.2",
+       [ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT] = "multi-vif-ps",
+       [ATH10K_FW_FEATURE_WOWLAN_SUPPORT] = "wowlan",
+       [ATH10K_FW_FEATURE_IGNORE_OTP_RESULT] = "ignore-otp",
+       [ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING] = "no-4addr-pad",
+       [ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT] = "skip-clock-init",
+};
+
+static unsigned int ath10k_core_get_fw_feature_str(char *buf,
+                                                  size_t buf_len,
+                                                  enum ath10k_fw_features feat)
+{
+       if (feat >= ARRAY_SIZE(ath10k_core_fw_feature_str) ||
+           WARN_ON(!ath10k_core_fw_feature_str[feat])) {
+               return scnprintf(buf, buf_len, "bit%d", feat);
+       }
+
+       return scnprintf(buf, buf_len, "%s", ath10k_core_fw_feature_str[feat]);
+}
+
+void ath10k_core_get_fw_features_str(struct ath10k *ar,
+                                    char *buf,
+                                    size_t buf_len)
+{
+       unsigned int len = 0;
+       int i;
+
+       for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
+               if (test_bit(i, ar->fw_features)) {
+                       if (len > 0)
+                               len += scnprintf(buf + len, buf_len - len, ",");
+
+                       len += ath10k_core_get_fw_feature_str(buf + len,
+                                                             buf_len - len,
+                                                             i);
+               }
+       }
+}
+
 static void ath10k_send_suspend_complete(struct ath10k *ar)
 {
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot suspend complete\n");
@@ -355,6 +419,7 @@ out:
 static int ath10k_download_and_run_otp(struct ath10k *ar)
 {
        u32 result, address = ar->hw_params.patch_load_addr;
+       u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param;
        int ret;
 
        ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len);
@@ -380,7 +445,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
                return ret;
        }
 
-       ret = ath10k_bmi_execute(ar, address, 0, &result);
+       ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result);
        if (ret) {
                ath10k_err(ar, "could not execute otp (%d)\n", ret);
                return ret;
@@ -412,6 +477,13 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
                data = ar->firmware_data;
                data_len = ar->firmware_len;
                mode_name = "normal";
+               ret = ath10k_swap_code_seg_configure(ar,
+                               ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW);
+               if (ret) {
+                       ath10k_err(ar, "failed to configure fw code swap: %d\n",
+                                  ret);
+                       return ret;
+               }
                break;
        case ATH10K_FIRMWARE_MODE_UTF:
                data = ar->testmode.utf->data;
@@ -451,6 +523,8 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
        if (!IS_ERR(ar->cal_file))
                release_firmware(ar->cal_file);
 
+       ath10k_swap_code_seg_release(ar);
+
        ar->board = NULL;
        ar->board_data = NULL;
        ar->board_len = 0;
@@ -464,6 +538,7 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
        ar->firmware_len = 0;
 
        ar->cal_file = NULL;
+
 }
 
 static int ath10k_fetch_cal_file(struct ath10k *ar)
@@ -737,6 +812,13 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
                        ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
                                   ar->htt.op_version);
                        break;
+               case ATH10K_FW_IE_FW_CODE_SWAP_IMAGE:
+                       ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                                  "found fw code swap image ie (%zd B)\n",
+                                  ie_len);
+                       ar->swap.firmware_codeswap_data = data;
+                       ar->swap.firmware_codeswap_len = ie_len;
+                       break;
                default:
                        ath10k_warn(ar, "Unknown FW IE: %u\n",
                                    le32_to_cpu(hdr->id));
@@ -1014,6 +1096,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
                ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
                ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
                        WMI_STAT_PEER;
+               ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
                break;
        case ATH10K_FW_WMI_OP_VERSION_10_1:
        case ATH10K_FW_WMI_OP_VERSION_10_2:
@@ -1023,6 +1106,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
                ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
                ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
                ar->fw_stats_req_mask = WMI_STAT_PEER;
+               ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
                break;
        case ATH10K_FW_WMI_OP_VERSION_TLV:
                ar->max_num_peers = TARGET_TLV_NUM_PEERS;
@@ -1033,6 +1117,17 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
                ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS;
                ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
                        WMI_STAT_PEER;
+               ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
+               break;
+       case ATH10K_FW_WMI_OP_VERSION_10_4:
+               ar->max_num_peers = TARGET_10_4_NUM_PEERS;
+               ar->max_num_stations = TARGET_10_4_NUM_STATIONS;
+               ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS;
+               ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS;
+               ar->num_tids = TARGET_10_4_TGT_NUM_TIDS;
+               ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC;
+               ar->fw_stats_req_mask = WMI_STAT_PEER;
+               ar->max_spatial_stream = WMI_10_4_MAX_SPATIAL_STREAM;
                break;
        case ATH10K_FW_WMI_OP_VERSION_UNSET:
        case ATH10K_FW_WMI_OP_VERSION_MAX:
@@ -1056,6 +1151,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
                case ATH10K_FW_WMI_OP_VERSION_TLV:
                        ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
                        break;
+               case ATH10K_FW_WMI_OP_VERSION_10_4:
                case ATH10K_FW_WMI_OP_VERSION_UNSET:
                case ATH10K_FW_WMI_OP_VERSION_MAX:
                        WARN_ON(1);
@@ -1330,6 +1426,13 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
                goto err_free_firmware_files;
        }
 
+       ret = ath10k_swap_code_seg_init(ar);
+       if (ret) {
+               ath10k_err(ar, "failed to initialize code swap segment: %d\n",
+                          ret);
+               goto err_free_firmware_files;
+       }
+
        mutex_lock(&ar->conf_mutex);
 
        ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
@@ -1470,9 +1573,15 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
        switch (hw_rev) {
        case ATH10K_HW_QCA988X:
                ar->regs = &qca988x_regs;
+               ar->hw_values = &qca988x_values;
                break;
        case ATH10K_HW_QCA6174:
                ar->regs = &qca6174_regs;
+               ar->hw_values = &qca6174_values;
+               break;
+       case ATH10K_HW_QCA99X0:
+               ar->regs = &qca99x0_regs;
+               ar->hw_values = &qca99x0_values;
                break;
        default:
                ath10k_err(ar, "unsupported core hardware revision %d\n",
index 78094f23c9dd5264a66d32167680a8af8728a8ff..78e07051b8970415086783674ba2d123cfddc312 100644 (file)
@@ -36,6 +36,7 @@
 #include "spectral.h"
 #include "thermal.h"
 #include "wow.h"
+#include "swap.h"
 
 #define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -327,8 +328,8 @@ struct ath10k_vif {
                        u32 uapsd;
                } sta;
                struct {
-                       /* 127 stations; wmi limit */
-                       u8 tim_bitmap[16];
+                       /* 512 stations */
+                       u8 tim_bitmap[64];
                        u8 tim_len;
                        u32 ssid_len;
                        u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -545,6 +546,7 @@ struct ath10k {
        u32 ht_cap_info;
        u32 vht_cap_info;
        u32 num_rf_chains;
+       u32 max_spatial_stream;
        /* protected by conf_mutex */
        bool ani_enabled;
 
@@ -560,6 +562,7 @@ struct ath10k {
        struct completion target_suspend;
 
        const struct ath10k_hw_regs *regs;
+       const struct ath10k_hw_values *hw_values;
        struct ath10k_bmi bmi;
        struct ath10k_wmi wmi;
        struct ath10k_htc htc;
@@ -570,6 +573,7 @@ struct ath10k {
                const char *name;
                u32 patch_load_addr;
                int uart_pin;
+               u32 otp_exe_param;
 
                /* This is true if given HW chip has a quirky Cycle Counter
                 * wraparound which resets to 0x7fffffff instead of 0. All
@@ -578,6 +582,12 @@ struct ath10k {
                 */
                bool has_shifted_cc_wraparound;
 
+               /* Some of chip expects fragment descriptor to be continuous
+                * memory for any TX operation. Set continuous_frag_desc flag
+                * for the hardware which have such requirement.
+                */
+               bool continuous_frag_desc;
+
                struct ath10k_hw_params_fw {
                        const char *dir;
                        const char *fw;
@@ -602,6 +612,12 @@ struct ath10k {
 
        const struct firmware *cal_file;
 
+       struct {
+               const void *firmware_codeswap_data;
+               size_t firmware_codeswap_len;
+               struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
+       } swap;
+
        char spec_board_id[100];
        bool spec_board_loaded;
 
@@ -617,6 +633,7 @@ struct ath10k {
                bool is_roc;
                int vdev_id;
                int roc_freq;
+               bool roc_notify;
        } scan;
 
        struct {
@@ -675,6 +692,8 @@ struct ath10k {
        int max_num_stations;
        int max_num_vdevs;
        int max_num_tdls_vdevs;
+       int num_active_peers;
+       int num_tids;
 
        struct work_struct offchan_tx_work;
        struct sk_buff_head offchan_tx_queue;
@@ -749,6 +768,9 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
                                  enum ath10k_hw_rev hw_rev,
                                  const struct ath10k_hif_ops *hif_ops);
 void ath10k_core_destroy(struct ath10k *ar);
+void ath10k_core_get_fw_features_str(struct ath10k *ar,
+                                    char *buf,
+                                    size_t max_len);
 
 int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode);
 int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
index 8fa606a9c4ddaf3f95b43d80723a0fbcfe59a010..edf6047997a7bef6a131692e6f842517c91fe891 100644 (file)
@@ -124,7 +124,11 @@ EXPORT_SYMBOL(ath10k_info);
 
 void ath10k_print_driver_info(struct ath10k *ar)
 {
-       ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt %d.%d wmi %d cal %s max_sta %d\n",
+       char fw_features[128];
+
+       ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
+
+       ath10k_info(ar, "%s (0x%08x, 0x%08x%s%s%s) fw %s api %d htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d features %s\n",
                    ar->hw_params.name,
                    ar->target_version,
                    ar->chip_id,
@@ -137,8 +141,10 @@ void ath10k_print_driver_info(struct ath10k *ar)
                    ar->htt.target_version_major,
                    ar->htt.target_version_minor,
                    ar->wmi.op_version,
+                   ar->htt.op_version,
                    ath10k_cal_mode_str(ar->cal_mode),
-                   ar->max_num_stations);
+                   ar->max_num_stations,
+                   fw_features);
        ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n",
                    config_enabled(CONFIG_ATH10K_DEBUG),
                    config_enabled(CONFIG_ATH10K_DEBUGFS),
index 6da6ef26143af0caeac2ed8ed76b14f28319b471..4474c3e839db459cd9e18eb33b821a7045da3190 100644 (file)
@@ -102,6 +102,43 @@ static const enum htt_t2h_msg_type htt_tlv_t2h_msg_types[] = {
        [HTT_TLV_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
 };
 
+static const enum htt_t2h_msg_type htt_10_4_t2h_msg_types[] = {
+       [HTT_10_4_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+       [HTT_10_4_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+       [HTT_10_4_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+       [HTT_10_4_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+       [HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+       [HTT_10_4_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+       [HTT_10_4_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+       [HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+       [HTT_10_4_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+       [HTT_10_4_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+       [HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+       [HTT_10_4_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+       [HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+       [HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND] =
+                               HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+       [HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+                               HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+       [HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+       [HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+                               HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+       [HTT_10_4_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+       [HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+                               HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+       [HTT_10_4_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+       [HTT_10_4_T2H_MSG_TYPE_EN_STATS] = HTT_T2H_MSG_TYPE_EN_STATS,
+       [HTT_10_4_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
+       [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND] =
+                               HTT_T2H_MSG_TYPE_TX_FETCH_IND,
+       [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF] =
+                               HTT_T2H_MSG_TYPE_TX_FETCH_CONF,
+       [HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD] =
+                               HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
+       [HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND] =
+                               HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND,
+};
+
 int ath10k_htt_connect(struct ath10k_htt *htt)
 {
        struct ath10k_htc_svc_conn_req conn_req;
@@ -147,6 +184,10 @@ int ath10k_htt_init(struct ath10k *ar)
                2; /* ip4 dscp or ip6 priority */
 
        switch (ar->htt.op_version) {
+       case ATH10K_FW_HTT_OP_VERSION_10_4:
+               ar->htt.t2h_msg_types = htt_10_4_t2h_msg_types;
+               ar->htt.t2h_msg_types_max = HTT_10_4_T2H_NUM_MSGS;
+               break;
        case ATH10K_FW_HTT_OP_VERSION_10_1:
                ar->htt.t2h_msg_types = htt_10x_t2h_msg_types;
                ar->htt.t2h_msg_types_max = HTT_10X_T2H_NUM_MSGS;
@@ -208,5 +249,9 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
        if (status)
                return status;
 
+       status = ath10k_htt_send_frag_desc_bank_cfg(htt);
+       if (status)
+               return status;
+
        return ath10k_htt_send_rx_ring_cfg_ll(htt);
 }
index 7e8a0d83566379b01fec63235d968a9913639b52..8bdf1e7dd1718ab72199efe70bf1dc476adb0fa2 100644 (file)
@@ -87,6 +87,11 @@ struct htt_data_tx_desc_frag {
        __le32 len;
 } __packed;
 
+struct htt_msdu_ext_desc {
+       __le32 tso_flag[4];
+       struct htt_data_tx_desc_frag frags[6];
+};
+
 enum htt_data_tx_desc_flags0 {
        HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
        HTT_DATA_TX_DESC_FLAGS0_NO_AGGR         = 1 << 1,
@@ -349,6 +354,38 @@ enum htt_tlv_t2h_msg_type {
        HTT_TLV_T2H_NUM_MSGS
 };
 
+enum htt_10_4_t2h_msg_type {
+       HTT_10_4_T2H_MSG_TYPE_VERSION_CONF           = 0x0,
+       HTT_10_4_T2H_MSG_TYPE_RX_IND                 = 0x1,
+       HTT_10_4_T2H_MSG_TYPE_RX_FLUSH               = 0x2,
+       HTT_10_4_T2H_MSG_TYPE_PEER_MAP               = 0x3,
+       HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP             = 0x4,
+       HTT_10_4_T2H_MSG_TYPE_RX_ADDBA               = 0x5,
+       HTT_10_4_T2H_MSG_TYPE_RX_DELBA               = 0x6,
+       HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND           = 0x7,
+       HTT_10_4_T2H_MSG_TYPE_PKTLOG                 = 0x8,
+       HTT_10_4_T2H_MSG_TYPE_STATS_CONF             = 0x9,
+       HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND            = 0xa,
+       HTT_10_4_T2H_MSG_TYPE_SEC_IND                = 0xb,
+       HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND          = 0xc,
+       HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND         = 0xd,
+       HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND      = 0xe,
+       HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE            = 0xf,
+       HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND   = 0x10,
+       HTT_10_4_T2H_MSG_TYPE_RX_PN_IND              = 0x11,
+       HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x12,
+       HTT_10_4_T2H_MSG_TYPE_TEST                   = 0x13,
+       HTT_10_4_T2H_MSG_TYPE_EN_STATS               = 0x14,
+       HTT_10_4_T2H_MSG_TYPE_AGGR_CONF              = 0x15,
+       HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND           = 0x16,
+       HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF          = 0x17,
+       HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD         = 0x18,
+       /* 0x19 to 0x2f are reserved */
+       HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND     = 0x30,
+       /* keep this last */
+       HTT_10_4_T2H_NUM_MSGS
+};
+
 enum htt_t2h_msg_type {
        HTT_T2H_MSG_TYPE_VERSION_CONF,
        HTT_T2H_MSG_TYPE_RX_IND,
@@ -375,6 +412,10 @@ enum htt_t2h_msg_type {
        HTT_T2H_MSG_TYPE_AGGR_CONF,
        HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
        HTT_T2H_MSG_TYPE_TEST,
+       HTT_T2H_MSG_TYPE_EN_STATS,
+       HTT_T2H_MSG_TYPE_TX_FETCH_IND,
+       HTT_T2H_MSG_TYPE_TX_FETCH_CONF,
+       HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND,
        /* keep this last */
        HTT_T2H_NUM_MSGS
 };
@@ -1430,6 +1471,11 @@ struct ath10k_htt {
 
        /* rx_status template */
        struct ieee80211_rx_status rx_status;
+
+       struct {
+               dma_addr_t paddr;
+               struct htt_msdu_ext_desc *vaddr;
+       } frag_desc;
 };
 
 #define RX_HTT_HDR_STATUS_LEN 64
@@ -1497,6 +1543,7 @@ void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
+int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt);
 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
                                u8 max_subfrms_ampdu,
index 89eb16b30fc42479a3b1c11a7b9b3fd88c043490..d7d118328f31b2bc96d661cae2c0d8e2591d0ae5 100644 (file)
@@ -1201,7 +1201,6 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
 {
        struct htt_rx_desc *rxd;
        enum rx_msdu_decap_format decap;
-       struct ieee80211_hdr *hdr;
 
        /* First msdu's decapped header:
         * [802.11 header] <-- padded to 4 bytes long
@@ -1215,7 +1214,6 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
         */
 
        rxd = (void *)msdu->data - sizeof(*rxd);
-       hdr = (void *)rxd->rx_hdr_status;
        decap = MS(__le32_to_cpu(rxd->msdu_start.info1),
                   RX_MSDU_START_INFO1_DECAP_FORMAT);
 
@@ -2074,6 +2072,10 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
                break;
        case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
                break;
+       case HTT_T2H_MSG_TYPE_EN_STATS:
+       case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
+       case HTT_T2H_MSG_TYPE_TX_FETCH_CONF:
+       case HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND:
        default:
                ath10k_warn(ar, "htt event (%d) not handled\n",
                            resp->hdr.msg_type);
index a60ef7d1d5fcb98278b3838858432f4cb3c8930a..148d5b607c3cf08decf9aba36b82d16e87cf19b9 100644 (file)
@@ -84,6 +84,7 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
 int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
 {
        struct ath10k *ar = htt->ar;
+       int ret, size;
 
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
                   htt->max_num_pending_tx);
@@ -94,11 +95,31 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
        htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
                                       sizeof(struct ath10k_htt_txbuf), 4, 0);
        if (!htt->tx_pool) {
-               idr_destroy(&htt->pending_tx);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto free_idr_pending_tx;
+       }
+
+       if (!ar->hw_params.continuous_frag_desc)
+               goto skip_frag_desc_alloc;
+
+       size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
+       htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
+                                                 &htt->frag_desc.paddr,
+                                                 GFP_DMA);
+       if (!htt->frag_desc.vaddr) {
+               ath10k_warn(ar, "failed to alloc fragment desc memory\n");
+               ret = -ENOMEM;
+               goto free_tx_pool;
        }
 
+skip_frag_desc_alloc:
        return 0;
+
+free_tx_pool:
+       dma_pool_destroy(htt->tx_pool);
+free_idr_pending_tx:
+       idr_destroy(&htt->pending_tx);
+       return ret;
 }
 
 static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
@@ -121,9 +142,18 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
 
 void ath10k_htt_tx_free(struct ath10k_htt *htt)
 {
+       int size;
+
        idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
        idr_destroy(&htt->pending_tx);
        dma_pool_destroy(htt->tx_pool);
+
+       if (htt->frag_desc.vaddr) {
+               size = htt->max_num_pending_tx *
+                                 sizeof(struct htt_msdu_ext_desc);
+               dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr,
+                                 htt->frag_desc.paddr);
+       }
 }
 
 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
@@ -201,6 +231,48 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
        return 0;
 }
 
+int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
+{
+       struct ath10k *ar = htt->ar;
+       struct sk_buff *skb;
+       struct htt_cmd *cmd;
+       int ret, size;
+
+       if (!ar->hw_params.continuous_frag_desc)
+               return 0;
+
+       if (!htt->frag_desc.paddr) {
+               ath10k_warn(ar, "invalid frag desc memory\n");
+               return -EINVAL;
+       }
+
+       size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg);
+       skb = ath10k_htc_alloc_skb(ar, size);
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put(skb, size);
+       cmd = (struct htt_cmd *)skb->data;
+       cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
+       cmd->frag_desc_bank_cfg.info = 0;
+       cmd->frag_desc_bank_cfg.num_banks = 1;
+       cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc);
+       cmd->frag_desc_bank_cfg.bank_base_addrs[0] =
+                               __cpu_to_le32(htt->frag_desc.paddr);
+       cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id =
+                               __cpu_to_le16(htt->max_num_pending_tx - 1);
+
+       ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+       if (ret) {
+               ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
+                           ret);
+               dev_kfree_skb_any(skb);
+               return ret;
+       }
+
+       return 0;
+}
+
 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
 {
        struct ath10k *ar = htt->ar;
index 5997f00afe3b43b677f1718a64a213090c969d92..fef7ccf6e185eacee92b8eee1a22071edff550a5 100644 (file)
@@ -34,8 +34,15 @@ const struct ath10k_hw_regs qca988x_regs = {
        .ce7_base_address               = 0x00059000,
        .soc_reset_control_si0_rst_mask = 0x00000001,
        .soc_reset_control_ce_rst_mask  = 0x00040000,
-       .soc_chip_id_address            = 0x00ec,
-       .scratch_3_address              = 0x0030,
+       .soc_chip_id_address            = 0x000000ec,
+       .scratch_3_address              = 0x00000030,
+       .fw_indicator_address           = 0x00009030,
+       .pcie_local_base_address        = 0x00080000,
+       .ce_wrap_intr_sum_host_msi_lsb  = 0x00000008,
+       .ce_wrap_intr_sum_host_msi_mask = 0x0000ff00,
+       .pcie_intr_fw_mask              = 0x00000400,
+       .pcie_intr_ce_mask_all          = 0x0007f800,
+       .pcie_intr_clr_address          = 0x00000014,
 };
 
 const struct ath10k_hw_regs qca6174_regs = {
@@ -54,8 +61,79 @@ const struct ath10k_hw_regs qca6174_regs = {
        .ce7_base_address                       = 0x00036000,
        .soc_reset_control_si0_rst_mask         = 0x00000000,
        .soc_reset_control_ce_rst_mask          = 0x00000001,
-       .soc_chip_id_address                    = 0x000f0,
-       .scratch_3_address                      = 0x0028,
+       .soc_chip_id_address                    = 0x000000f0,
+       .scratch_3_address                      = 0x00000028,
+       .fw_indicator_address                   = 0x0003a028,
+       .pcie_local_base_address                = 0x00080000,
+       .ce_wrap_intr_sum_host_msi_lsb          = 0x00000008,
+       .ce_wrap_intr_sum_host_msi_mask         = 0x0000ff00,
+       .pcie_intr_fw_mask                      = 0x00000400,
+       .pcie_intr_ce_mask_all                  = 0x0007f800,
+       .pcie_intr_clr_address                  = 0x00000014,
+};
+
+const struct ath10k_hw_regs qca99x0_regs = {
+       .rtc_state_cold_reset_mask              = 0x00000400,
+       .rtc_soc_base_address                   = 0x00080000,
+       .rtc_wmac_base_address                  = 0x00000000,
+       .soc_core_base_address                  = 0x00082000,
+       .ce_wrapper_base_address                = 0x0004d000,
+       .ce0_base_address                       = 0x0004a000,
+       .ce1_base_address                       = 0x0004a400,
+       .ce2_base_address                       = 0x0004a800,
+       .ce3_base_address                       = 0x0004ac00,
+       .ce4_base_address                       = 0x0004b000,
+       .ce5_base_address                       = 0x0004b400,
+       .ce6_base_address                       = 0x0004b800,
+       .ce7_base_address                       = 0x0004bc00,
+       /* Note: qca99x0 supports upto 12 Copy Engines. Other than address of
+        * CE0 and CE1 no other copy engine is directly referred in the code.
+        * It is not really neccessary to assign address for newly supported
+        * CEs in this address table.
+        *      Copy Engine             Address
+        *      CE8                     0x0004c000
+        *      CE9                     0x0004c400
+        *      CE10                    0x0004c800
+        *      CE11                    0x0004cc00
+        */
+       .soc_reset_control_si0_rst_mask         = 0x00000001,
+       .soc_reset_control_ce_rst_mask          = 0x00000100,
+       .soc_chip_id_address                    = 0x000000ec,
+       .scratch_3_address                      = 0x00040050,
+       .fw_indicator_address                   = 0x00040050,
+       .pcie_local_base_address                = 0x00000000,
+       .ce_wrap_intr_sum_host_msi_lsb          = 0x0000000c,
+       .ce_wrap_intr_sum_host_msi_mask         = 0x00fff000,
+       .pcie_intr_fw_mask                      = 0x00100000,
+       .pcie_intr_ce_mask_all                  = 0x000fff00,
+       .pcie_intr_clr_address                  = 0x00000010,
+};
+
+const struct ath10k_hw_values qca988x_values = {
+       .rtc_state_val_on               = 3,
+       .ce_count                       = 8,
+       .msi_assign_ce_max              = 7,
+       .num_target_ce_config_wlan      = 7,
+       .ce_desc_meta_data_mask         = 0xFFFC,
+       .ce_desc_meta_data_lsb          = 2,
+};
+
+const struct ath10k_hw_values qca6174_values = {
+       .rtc_state_val_on               = 3,
+       .ce_count                       = 8,
+       .msi_assign_ce_max              = 7,
+       .num_target_ce_config_wlan      = 7,
+       .ce_desc_meta_data_mask         = 0xFFFC,
+       .ce_desc_meta_data_lsb          = 2,
+};
+
+const struct ath10k_hw_values qca99x0_values = {
+       .rtc_state_val_on               = 5,
+       .ce_count                       = 12,
+       .msi_assign_ce_max              = 12,
+       .num_target_ce_config_wlan      = 10,
+       .ce_desc_meta_data_mask         = 0xFFF0,
+       .ce_desc_meta_data_lsb          = 4,
 };
 
 void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
index 85cca29375fee8f08ab09975c31cc3130194176e..9172285175461ab359f9a7b552a7276b664000a9 100644 (file)
@@ -72,6 +72,18 @@ enum qca6174_chip_id_rev {
 #define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin"
 #define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234
 
+/* QCA99X0 1.0 definitions (unsupported) */
+#define QCA99X0_HW_1_0_CHIP_ID_REV     0x0
+
+/* QCA99X0 2.0 definitions */
+#define QCA99X0_HW_2_0_DEV_VERSION     0x01000000
+#define QCA99X0_HW_2_0_CHIP_ID_REV     0x1
+#define QCA99X0_HW_2_0_FW_DIR          ATH10K_FW_DIR "/QCA99X0/hw2.0"
+#define QCA99X0_HW_2_0_FW_FILE         "firmware.bin"
+#define QCA99X0_HW_2_0_OTP_FILE        "otp.bin"
+#define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin"
+#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234
+
 #define ATH10K_FW_API2_FILE            "firmware-2.bin"
 #define ATH10K_FW_API3_FILE            "firmware-3.bin"
 
@@ -112,6 +124,9 @@ enum ath10k_fw_ie_type {
         * FW API 5 and above.
         */
        ATH10K_FW_IE_HTT_OP_VERSION = 6,
+
+       /* Code swap image for firmware binary */
+       ATH10K_FW_IE_FW_CODE_SWAP_IMAGE = 7,
 };
 
 enum ath10k_fw_wmi_op_version {
@@ -122,6 +137,7 @@ enum ath10k_fw_wmi_op_version {
        ATH10K_FW_WMI_OP_VERSION_10_2 = 3,
        ATH10K_FW_WMI_OP_VERSION_TLV = 4,
        ATH10K_FW_WMI_OP_VERSION_10_2_4 = 5,
+       ATH10K_FW_WMI_OP_VERSION_10_4 = 6,
 
        /* keep last */
        ATH10K_FW_WMI_OP_VERSION_MAX,
@@ -137,6 +153,8 @@ enum ath10k_fw_htt_op_version {
 
        ATH10K_FW_HTT_OP_VERSION_TLV = 3,
 
+       ATH10K_FW_HTT_OP_VERSION_10_4 = 4,
+
        /* keep last */
        ATH10K_FW_HTT_OP_VERSION_MAX,
 };
@@ -144,6 +162,7 @@ enum ath10k_fw_htt_op_version {
 enum ath10k_hw_rev {
        ATH10K_HW_QCA988X,
        ATH10K_HW_QCA6174,
+       ATH10K_HW_QCA99X0,
 };
 
 struct ath10k_hw_regs {
@@ -164,16 +183,38 @@ struct ath10k_hw_regs {
        u32 soc_reset_control_ce_rst_mask;
        u32 soc_chip_id_address;
        u32 scratch_3_address;
+       u32 fw_indicator_address;
+       u32 pcie_local_base_address;
+       u32 ce_wrap_intr_sum_host_msi_lsb;
+       u32 ce_wrap_intr_sum_host_msi_mask;
+       u32 pcie_intr_fw_mask;
+       u32 pcie_intr_ce_mask_all;
+       u32 pcie_intr_clr_address;
 };
 
 extern const struct ath10k_hw_regs qca988x_regs;
 extern const struct ath10k_hw_regs qca6174_regs;
+extern const struct ath10k_hw_regs qca99x0_regs;
+
+struct ath10k_hw_values {
+       u32 rtc_state_val_on;
+       u8 ce_count;
+       u8 msi_assign_ce_max;
+       u8 num_target_ce_config_wlan;
+       u16 ce_desc_meta_data_mask;
+       u8 ce_desc_meta_data_lsb;
+};
+
+extern const struct ath10k_hw_values qca988x_values;
+extern const struct ath10k_hw_values qca6174_values;
+extern const struct ath10k_hw_values qca99x0_values;
 
 void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
                                u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
 
 #define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
 #define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
+#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0)
 
 /* Known pecularities:
  *  - current FW doesn't support raw rx mode (last tested v599)
@@ -310,8 +351,73 @@ enum ath10k_hw_rate_cck {
 #define TARGET_TLV_NUM_MSDU_DESC               (1024 + 32)
 #define TARGET_TLV_NUM_WOW_PATTERNS            22
 
+/* Diagnostic Window */
+#define CE_DIAG_PIPE   7
+
+#define NUM_TARGET_CE_CONFIG_WLAN ar->hw_values->num_target_ce_config_wlan
+
+/* Target specific defines for 10.4 firmware */
+#define TARGET_10_4_NUM_VDEVS                  16
+#define TARGET_10_4_NUM_STATIONS               32
+#define TARGET_10_4_NUM_PEERS                  ((TARGET_10_4_NUM_STATIONS) + \
+                                                (TARGET_10_4_NUM_VDEVS))
+#define TARGET_10_4_ACTIVE_PEERS               0
+
+/* TODO: increase qcache max client limit to 512 after
+ * testing with 512 client.
+ */
+#define TARGET_10_4_NUM_QCACHE_PEERS_MAX       256
+#define TARGET_10_4_QCACHE_ACTIVE_PEERS                50
+#define TARGET_10_4_NUM_OFFLOAD_PEERS          0
+#define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS  0
+#define TARGET_10_4_NUM_PEER_KEYS              2
+#define TARGET_10_4_TGT_NUM_TIDS               ((TARGET_10_4_NUM_PEERS) * 2)
+#define TARGET_10_4_AST_SKID_LIMIT             32
+#define TARGET_10_4_TX_CHAIN_MASK              (BIT(0) | BIT(1) | \
+                                                BIT(2) | BIT(3))
+#define TARGET_10_4_RX_CHAIN_MASK              (BIT(0) | BIT(1) | \
+                                                BIT(2) | BIT(3))
+
+/* 100 ms for video, best-effort, and background */
+#define TARGET_10_4_RX_TIMEOUT_LO_PRI          100
+
+/* 40 ms for voice */
+#define TARGET_10_4_RX_TIMEOUT_HI_PRI          40
+
+#define TARGET_10_4_RX_DECAP_MODE              ATH10K_HW_TXRX_NATIVE_WIFI
+#define TARGET_10_4_SCAN_MAX_REQS              4
+#define TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV     3
+#define TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV      3
+#define TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES   8
+
+/* Note: mcast to ucast is disabled by default */
+#define TARGET_10_4_NUM_MCAST_GROUPS           0
+#define TARGET_10_4_NUM_MCAST_TABLE_ELEMS      0
+#define TARGET_10_4_MCAST2UCAST_MODE           0
+
+#define TARGET_10_4_TX_DBG_LOG_SIZE            1024
+#define TARGET_10_4_NUM_WDS_ENTRIES            32
+#define TARGET_10_4_DMA_BURST_SIZE             1
+#define TARGET_10_4_MAC_AGGR_DELIM             0
+#define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_10_4_VOW_CONFIG                 0
+#define TARGET_10_4_GTK_OFFLOAD_MAX_VDEV       3
+#define TARGET_10_4_NUM_MSDU_DESC              (1024 + 400)
+#define TARGET_10_4_11AC_TX_MAX_FRAGS          2
+#define TARGET_10_4_MAX_PEER_EXT_STATS         16
+#define TARGET_10_4_SMART_ANT_CAP              0
+#define TARGET_10_4_BK_MIN_FREE                        0
+#define TARGET_10_4_BE_MIN_FREE                        0
+#define TARGET_10_4_VI_MIN_FREE                        0
+#define TARGET_10_4_VO_MIN_FREE                        0
+#define TARGET_10_4_RX_BATCH_MODE              1
+#define TARGET_10_4_THERMAL_THROTTLING_CONFIG  0
+#define TARGET_10_4_ATF_CONFIG                 0
+#define TARGET_10_4_IPHDR_PAD_CONFIG           1
+#define TARGET_10_4_QWRAP_CONFIG               0
+
 /* Number of Copy Engines supported */
-#define CE_COUNT 8
+#define CE_COUNT ar->hw_values->ce_count
 
 /*
  * Total number of PCIe MSI interrupts requested for all interrupt sources.
@@ -335,10 +441,10 @@ enum ath10k_hw_rate_cck {
 
 /* MSIs for Copy Engines */
 #define MSI_ASSIGN_CE_INITIAL  1
-#define MSI_ASSIGN_CE_MAX      7
+#define MSI_ASSIGN_CE_MAX      ar->hw_values->msi_assign_ce_max
 
 /* as of IP3.7.1 */
-#define RTC_STATE_V_ON                         3
+#define RTC_STATE_V_ON                         ar->hw_values->rtc_state_val_on
 
 #define RTC_STATE_COLD_RESET_MASK              ar->regs->rtc_state_cold_reset_mask
 #define RTC_STATE_V_LSB                                0
@@ -374,7 +480,7 @@ enum ath10k_hw_rate_cck {
 #define CE7_BASE_ADDRESS                       ar->regs->ce7_base_address
 #define DBI_BASE_ADDRESS                       0x00060000
 #define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS     0x0006c000
-#define PCIE_LOCAL_BASE_ADDRESS                        0x00080000
+#define PCIE_LOCAL_BASE_ADDRESS                ar->regs->pcie_local_base_address
 
 #define SOC_RESET_CONTROL_ADDRESS              0x00000000
 #define SOC_RESET_CONTROL_OFFSET               0x00000000
@@ -448,7 +554,7 @@ enum ath10k_hw_rate_cck {
 #define CORE_CTRL_ADDRESS                      0x0000
 #define PCIE_INTR_ENABLE_ADDRESS               0x0008
 #define PCIE_INTR_CAUSE_ADDRESS                        0x000c
-#define PCIE_INTR_CLR_ADDRESS                  0x0014
+#define PCIE_INTR_CLR_ADDRESS                  ar->regs->pcie_intr_clr_address
 #define SCRATCH_3_ADDRESS                      ar->regs->scratch_3_address
 #define CPU_INTR_ADDRESS                       0x0010
 
@@ -456,16 +562,18 @@ enum ath10k_hw_rate_cck {
 #define CCNT_TO_MSEC(x) ((x) / 88000)
 
 /* Firmware indications to the Host via SCRATCH_3 register. */
-#define FW_INDICATOR_ADDRESS   (SOC_CORE_BASE_ADDRESS + SCRATCH_3_ADDRESS)
+#define FW_INDICATOR_ADDRESS                   ar->regs->fw_indicator_address
 #define FW_IND_EVENT_PENDING                   1
 #define FW_IND_INITIALIZED                     2
 
 /* HOST_REG interrupt from firmware */
-#define PCIE_INTR_FIRMWARE_MASK                        0x00000400
-#define PCIE_INTR_CE_MASK_ALL                  0x0007f800
+#define PCIE_INTR_FIRMWARE_MASK                        ar->regs->pcie_intr_fw_mask
+#define PCIE_INTR_CE_MASK_ALL                  ar->regs->pcie_intr_ce_mask_all
 
 #define DRAM_BASE_ADDRESS                      0x00400000
 
+#define PCIE_BAR_REG_ADDRESS                   0x40030
+
 #define MISSING 0
 
 #define SYSTEM_SLEEP_OFFSET                    SOC_SYSTEM_SLEEP_OFFSET
index 218b6af63447458fa81960479afeeecf9a1520e9..c9a7d5b5dffc5d367fe0ad9d84f071430eb8fb24 100644 (file)
@@ -1668,7 +1668,7 @@ static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
        return 0;
 }
 
-static int ath10k_mac_ps_vif_count(struct ath10k *ar)
+static int ath10k_mac_num_vifs_started(struct ath10k *ar)
 {
        struct ath10k_vif *arvif;
        int num = 0;
@@ -1676,7 +1676,7 @@ static int ath10k_mac_ps_vif_count(struct ath10k *ar)
        lockdep_assert_held(&ar->conf_mutex);
 
        list_for_each_entry(arvif, &ar->arvifs, list)
-               if (arvif->ps)
+               if (arvif->is_started)
                        num++;
 
        return num;
@@ -1700,7 +1700,7 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
 
        enable_ps = arvif->ps;
 
-       if (enable_ps && ath10k_mac_ps_vif_count(ar) > 1 &&
+       if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
            !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
                      ar->fw_features)) {
                ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
@@ -3034,38 +3034,16 @@ static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
 
        lockdep_assert_held(&ar->htt.tx_lock);
 
-       switch (pause_id) {
-       case WMI_TLV_TX_PAUSE_ID_MCC:
-       case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
-       case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
-       case WMI_TLV_TX_PAUSE_ID_AP_PS:
-       case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
-               switch (action) {
-               case WMI_TLV_TX_PAUSE_ACTION_STOP:
-                       ath10k_mac_vif_tx_lock(arvif, pause_id);
-                       break;
-               case WMI_TLV_TX_PAUSE_ACTION_WAKE:
-                       ath10k_mac_vif_tx_unlock(arvif, pause_id);
-                       break;
-               default:
-                       ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
-                                   action, arvif->vdev_id);
-                       break;
-               }
+       switch (action) {
+       case WMI_TLV_TX_PAUSE_ACTION_STOP:
+               ath10k_mac_vif_tx_lock(arvif, pause_id);
+               break;
+       case WMI_TLV_TX_PAUSE_ACTION_WAKE:
+               ath10k_mac_vif_tx_unlock(arvif, pause_id);
                break;
-       case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
-       case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
-       case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
-       case WMI_TLV_TX_PAUSE_ID_HOST:
        default:
-               /* FIXME: Some pause_ids aren't vdev specific. Instead they
-                * target peer_id and tid. Implementing these could improve
-                * traffic scheduling fairness across multiple connected
-                * stations in AP/IBSS modes.
-                */
-               ath10k_dbg(ar, ATH10K_DBG_MAC,
-                          "mac ignoring unsupported tx pause vdev %i id %d\n",
-                          arvif->vdev_id, pause_id);
+               ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
+                           action, arvif->vdev_id);
                break;
        }
 }
@@ -3082,12 +3060,15 @@ static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
        struct ath10k_mac_tx_pause *arg = data;
 
+       if (arvif->vdev_id != arg->vdev_id)
+               return;
+
        ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
 }
 
-void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
-                               enum wmi_tlv_tx_pause_id pause_id,
-                               enum wmi_tlv_tx_pause_action action)
+void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
+                                    enum wmi_tlv_tx_pause_id pause_id,
+                                    enum wmi_tlv_tx_pause_action action)
 {
        struct ath10k_mac_tx_pause arg = {
                .vdev_id = vdev_id,
@@ -3449,14 +3430,13 @@ void __ath10k_scan_finish(struct ath10k *ar)
        case ATH10K_SCAN_IDLE:
                break;
        case ATH10K_SCAN_RUNNING:
-               if (ar->scan.is_roc)
-                       ieee80211_remain_on_channel_expired(ar->hw);
-               /* fall through */
        case ATH10K_SCAN_ABORTING:
                if (!ar->scan.is_roc)
                        ieee80211_scan_completed(ar->hw,
                                                 (ar->scan.state ==
                                                  ATH10K_SCAN_ABORTING));
+               else if (ar->scan.roc_notify)
+                       ieee80211_remain_on_channel_expired(ar->hw);
                /* fall through */
        case ATH10K_SCAN_STARTING:
                ar->scan.state = ATH10K_SCAN_IDLE;
@@ -4641,9 +4621,6 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
        arg.vdev_id = arvif->vdev_id;
        arg.scan_id = ATH10K_SCAN_ID;
 
-       if (!req->no_cck)
-               arg.scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES;
-
        if (req->ie_len) {
                arg.ie_len = req->ie_len;
                memcpy(arg.ie, req->ie, arg.ie_len);
@@ -5462,6 +5439,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
                ar->scan.is_roc = true;
                ar->scan.vdev_id = arvif->vdev_id;
                ar->scan.roc_freq = chan->center_freq;
+               ar->scan.roc_notify = true;
                ret = 0;
                break;
        case ATH10K_SCAN_STARTING:
@@ -5525,7 +5503,13 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
        struct ath10k *ar = hw->priv;
 
        mutex_lock(&ar->conf_mutex);
+
+       spin_lock_bh(&ar->data_lock);
+       ar->scan.roc_notify = false;
+       spin_unlock_bh(&ar->data_lock);
+
        ath10k_scan_abort(ar);
+
        mutex_unlock(&ar->conf_mutex);
 
        cancel_delayed_work_sync(&ar->scan.timeout);
@@ -5566,7 +5550,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 {
        struct ath10k *ar = hw->priv;
        bool skip;
-       int ret;
+       long time_left;
 
        /* mac80211 doesn't care if we really xmit queued frames or not
         * we'll collect those frames either way if we stop/delete vdevs */
@@ -5578,7 +5562,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        if (ar->state == ATH10K_STATE_WEDGED)
                goto skip;
 
-       ret = wait_event_timeout(ar->htt.empty_tx_wq, ({
+       time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
                        bool empty;
 
                        spin_lock_bh(&ar->htt.tx_lock);
@@ -5592,9 +5576,9 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                        (empty || skip);
                }), ATH10K_FLUSH_TIMEOUT_HZ);
 
-       if (ret <= 0 || skip)
-               ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %i\n",
-                           skip, ar->state, ret);
+       if (time_left == 0 || skip)
+               ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
+                           skip, ar->state, time_left);
 
 skip:
        mutex_unlock(&ar->conf_mutex);
@@ -6219,6 +6203,13 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
 
        arvif->is_started = true;
 
+       ret = ath10k_mac_vif_setup_ps(arvif);
+       if (ret) {
+               ath10k_warn(ar, "failed to update vdev %i ps: %d\n",
+                           arvif->vdev_id, ret);
+               goto err_stop;
+       }
+
        if (vif->type == NL80211_IFTYPE_MONITOR) {
                ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
                if (ret) {
@@ -6236,6 +6227,7 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
 err_stop:
        ath10k_vdev_stop(arvif);
        arvif->is_started = false;
+       ath10k_mac_vif_setup_ps(arvif);
 
 err:
        mutex_unlock(&ar->conf_mutex);
@@ -6565,8 +6557,11 @@ static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
        {
                .max = 2,
-               .types = BIT(NL80211_IFTYPE_STATION) |
-                        BIT(NL80211_IFTYPE_AP) |
+               .types = BIT(NL80211_IFTYPE_STATION),
+       },
+       {
+               .max = 2,
+               .types = BIT(NL80211_IFTYPE_AP) |
                         BIT(NL80211_IFTYPE_P2P_CLIENT) |
                         BIT(NL80211_IFTYPE_P2P_GO),
        },
@@ -6576,6 +6571,26 @@ static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
        },
 };
 
+static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
+       {
+               .max = 2,
+               .types = BIT(NL80211_IFTYPE_STATION),
+       },
+       {
+               .max = 2,
+               .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_AP) |
+                        BIT(NL80211_IFTYPE_P2P_GO),
+       },
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+       },
+};
+
 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
        {
                .max = 1,
@@ -6594,7 +6609,7 @@ static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
        {
                .limits = ath10k_tlv_if_limit,
                .num_different_channels = 1,
-               .max_interfaces = 3,
+               .max_interfaces = 4,
                .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
        },
        {
@@ -6608,10 +6623,16 @@ static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
        {
                .limits = ath10k_tlv_if_limit,
-               .num_different_channels = 2,
-               .max_interfaces = 3,
+               .num_different_channels = 1,
+               .max_interfaces = 4,
                .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
        },
+       {
+               .limits = ath10k_tlv_qcs_if_limit,
+               .num_different_channels = 2,
+               .max_interfaces = 4,
+               .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit),
+       },
        {
                .limits = ath10k_tlv_if_limit_ibss,
                .num_different_channels = 1,
@@ -6620,6 +6641,33 @@ static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
        },
 };
 
+static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
+       {
+               .max = 1,
+               .types = BIT(NL80211_IFTYPE_STATION),
+       },
+       {
+               .max    = 16,
+               .types  = BIT(NL80211_IFTYPE_AP)
+       },
+};
+
+static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
+       {
+               .limits = ath10k_10_4_if_limits,
+               .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
+               .max_interfaces = 16,
+               .num_different_channels = 1,
+               .beacon_int_infra_match = true,
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+               .radar_detect_widths =  BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+                                       BIT(NL80211_CHAN_WIDTH_20) |
+                                       BIT(NL80211_CHAN_WIDTH_40) |
+                                       BIT(NL80211_CHAN_WIDTH_80),
+#endif
+       },
+};
+
 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
 {
        struct ieee80211_sta_vht_cap vht_cap = {0};
@@ -6902,6 +6950,8 @@ int ath10k_mac_register(struct ath10k *ar)
                goto err_free;
        }
 
+       wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
        /*
         * on LL hardware queues are managed entirely by the FW
         * so we only advertise to mac we can do the queues thing
@@ -6941,6 +6991,11 @@ int ath10k_mac_register(struct ath10k *ar)
                ar->hw->wiphy->n_iface_combinations =
                        ARRAY_SIZE(ath10k_10x_if_comb);
                break;
+       case ATH10K_FW_WMI_OP_VERSION_10_4:
+               ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
+               ar->hw->wiphy->n_iface_combinations =
+                       ARRAY_SIZE(ath10k_10_4_if_comb);
+               break;
        case ATH10K_FW_WMI_OP_VERSION_UNSET:
        case ATH10K_FW_WMI_OP_VERSION_MAX:
                WARN_ON(1);
index b291f063705c3bb816f811aad450e9549aca5dfb..e3cefe4c7cfd04aa4e8c142282330068209bcb68 100644 (file)
@@ -61,9 +61,9 @@ int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
 
 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb);
 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id);
-void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
-                               enum wmi_tlv_tx_pause_id pause_id,
-                               enum wmi_tlv_tx_pause_action action);
+void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
+                                    enum wmi_tlv_tx_pause_id pause_id,
+                                    enum wmi_tlv_tx_pause_action action);
 
 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
                             u8 hw_rate);
index ea656e011a96e195d4513e41a482a40bab07df42..5778e5277823c53731ab7dd047f60a19816c7515 100644 (file)
@@ -59,6 +59,7 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
 
 #define QCA988X_2_0_DEVICE_ID  (0x003c)
 #define QCA6174_2_1_DEVICE_ID  (0x003e)
+#define QCA99X0_2_0_DEVICE_ID  (0x0040)
 
 static const struct pci_device_id ath10k_pci_id_table[] = {
        { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
@@ -81,7 +82,7 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
 
 static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
 static int ath10k_pci_cold_reset(struct ath10k *ar);
-static int ath10k_pci_warm_reset(struct ath10k *ar);
+static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
 static int ath10k_pci_init_irq(struct ath10k *ar);
 static int ath10k_pci_deinit_irq(struct ath10k *ar);
@@ -90,6 +91,7 @@ static void ath10k_pci_free_irq(struct ath10k *ar);
 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
                               struct ath10k_ce_pipe *rx_pipe,
                               struct bmi_xfer *xfer);
+static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
 
 static const struct ce_attr host_ce_config_wlan[] = {
        /* CE0: host->target HTC control and raw streams */
@@ -155,6 +157,38 @@ static const struct ce_attr host_ce_config_wlan[] = {
                .src_sz_max = DIAG_TRANSFER_LIMIT,
                .dest_nentries = 2,
        },
+
+       /* CE8: target->host pktlog */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 2048,
+               .dest_nentries = 128,
+       },
+
+       /* CE9 target autonomous qcache memcpy */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 0,
+               .dest_nentries = 0,
+       },
+
+       /* CE10: target autonomous hif memcpy */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 0,
+               .dest_nentries = 0,
+       },
+
+       /* CE11: target autonomous hif memcpy */
+       {
+               .flags = CE_ATTR_FLAGS,
+               .src_nentries = 0,
+               .src_sz_max = 0,
+               .dest_nentries = 0,
+       },
 };
 
 /* Target firmware's Copy Engine configuration. */
@@ -232,6 +266,38 @@ static const struct ce_pipe_config target_ce_config_wlan[] = {
        },
 
        /* CE7 used only by Host */
+       {
+               .pipenum = __cpu_to_le32(7),
+               .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+               .nentries = __cpu_to_le32(0),
+               .nbytes_max = __cpu_to_le32(0),
+               .flags = __cpu_to_le32(0),
+               .reserved = __cpu_to_le32(0),
+       },
+
+       /* CE8 target->host packtlog */
+       {
+               .pipenum = __cpu_to_le32(8),
+               .pipedir = __cpu_to_le32(PIPEDIR_IN),
+               .nentries = __cpu_to_le32(64),
+               .nbytes_max = __cpu_to_le32(2048),
+               .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+               .reserved = __cpu_to_le32(0),
+       },
+
+       /* CE9 target autonomous qcache memcpy */
+       {
+               .pipenum = __cpu_to_le32(9),
+               .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+               .nentries = __cpu_to_le32(32),
+               .nbytes_max = __cpu_to_le32(2048),
+               .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+               .reserved = __cpu_to_le32(0),
+       },
+
+       /* It not necessary to send target wlan configuration for CE10 & CE11
+        * as these CEs are not actively used in target.
+        */
 };
 
 /*
@@ -479,6 +545,12 @@ void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
        struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
        int ret;
 
+       if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
+               ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
+                           offset, offset + sizeof(value), ar_pci->mem_len);
+               return;
+       }
+
        ret = ath10k_pci_wake(ar);
        if (ret) {
                ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
@@ -496,6 +568,12 @@ u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
        u32 val;
        int ret;
 
+       if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
+               ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
+                           offset, offset + sizeof(val), ar_pci->mem_len);
+               return 0;
+       }
+
        ret = ath10k_pci_wake(ar);
        if (ret) {
                ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
@@ -678,6 +756,26 @@ static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
        ath10k_pci_rx_post(ar);
 }
 
+static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+       u32 val = 0;
+
+       switch (ar->hw_rev) {
+       case ATH10K_HW_QCA988X:
+       case ATH10K_HW_QCA6174:
+               val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+                                         CORE_CTRL_ADDRESS) &
+                      0x7ff) << 21;
+               break;
+       case ATH10K_HW_QCA99X0:
+               val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
+               break;
+       }
+
+       val |= 0x100000 | (addr & 0xfffff);
+       return val;
+}
+
 /*
  * Diagnostic read/write access is provided for startup/config/debug usage.
  * Caller must guarantee proper alignment, when applicable, and single user
@@ -740,8 +838,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
                 * convert it from Target CPU virtual address space
                 * to CE address space
                 */
-               address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
-                                                    address);
+               address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
 
                ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
                                            0);
@@ -899,7 +996,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
         * to
         *    CE address space
         */
-       address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
+       address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
 
        remaining_bytes = orig_nbytes;
        ce_data = ce_data_base;
@@ -1331,20 +1428,42 @@ static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
 {
        u32 val;
 
-       val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
-       val &= ~CORE_CTRL_PCIE_REG_31_MASK;
-
-       ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
+       switch (ar->hw_rev) {
+       case ATH10K_HW_QCA988X:
+       case ATH10K_HW_QCA6174:
+               val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+                                       CORE_CTRL_ADDRESS);
+               val &= ~CORE_CTRL_PCIE_REG_31_MASK;
+               ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+                                  CORE_CTRL_ADDRESS, val);
+               break;
+       case ATH10K_HW_QCA99X0:
+               /* TODO: Find appropriate register configuration for QCA99X0
+                *  to mask irq/MSI.
+                */
+                break;
+       }
 }
 
 static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
 {
        u32 val;
 
-       val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
-       val |= CORE_CTRL_PCIE_REG_31_MASK;
-
-       ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
+       switch (ar->hw_rev) {
+       case ATH10K_HW_QCA988X:
+       case ATH10K_HW_QCA6174:
+               val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+                                       CORE_CTRL_ADDRESS);
+               val |= CORE_CTRL_PCIE_REG_31_MASK;
+               ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+                                  CORE_CTRL_ADDRESS, val);
+               break;
+       case ATH10K_HW_QCA99X0:
+               /* TODO: Find appropriate register configuration for QCA99X0
+                *  to unmask irq/MSI.
+                */
+               break;
+       }
 }
 
 static void ath10k_pci_irq_disable(struct ath10k *ar)
@@ -1506,7 +1625,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
         * masked. To prevent the device from asserting the interrupt reset it
         * before proceeding with cleanup.
         */
-       ath10k_pci_warm_reset(ar);
+       ath10k_pci_safe_chip_reset(ar);
 
        ath10k_pci_irq_disable(ar);
        ath10k_pci_irq_sync(ar);
@@ -1687,6 +1806,7 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
 
        switch (ar_pci->pdev->device) {
        case QCA988X_2_0_DEVICE_ID:
+       case QCA99X0_2_0_DEVICE_ID:
                return 1;
        case QCA6174_2_1_DEVICE_ID:
                switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
@@ -1757,7 +1877,8 @@ static int ath10k_pci_init_config(struct ath10k *ar)
 
        ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
                                        target_ce_config_wlan,
-                                       sizeof(target_ce_config_wlan));
+                                       sizeof(struct ce_pipe_config) *
+                                       NUM_TARGET_CE_CONFIG_WLAN);
 
        if (ret != 0) {
                ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
@@ -1871,7 +1992,7 @@ static int ath10k_pci_alloc_pipes(struct ath10k *ar)
                }
 
                /* Last CE is Diagnostic Window */
-               if (i == CE_COUNT - 1) {
+               if (i == CE_DIAG_PIPE) {
                        ar_pci->ce_diag = pipe->ce_hdl;
                        continue;
                }
@@ -2016,6 +2137,18 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
        return 0;
 }
 
+static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
+{
+       if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) {
+               return ath10k_pci_warm_reset(ar);
+       } else if (QCA_REV_99X0(ar)) {
+               ath10k_pci_irq_disable(ar);
+               return ath10k_pci_qca99x0_chip_reset(ar);
+       } else {
+               return -ENOTSUPP;
+       }
+}
+
 static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
 {
        int i, ret;
@@ -2122,12 +2255,38 @@ static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
        return 0;
 }
 
+static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
+{
+       int ret;
+
+       ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
+
+       ret = ath10k_pci_cold_reset(ar);
+       if (ret) {
+               ath10k_warn(ar, "failed to cold reset: %d\n", ret);
+               return ret;
+       }
+
+       ret = ath10k_pci_wait_for_target_init(ar);
+       if (ret) {
+               ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
+                           ret);
+               return ret;
+       }
+
+       ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
+
+       return 0;
+}
+
 static int ath10k_pci_chip_reset(struct ath10k *ar)
 {
        if (QCA_REV_988X(ar))
                return ath10k_pci_qca988x_chip_reset(ar);
        else if (QCA_REV_6174(ar))
                return ath10k_pci_qca6174_chip_reset(ar);
+       else if (QCA_REV_99X0(ar))
+               return ath10k_pci_qca99x0_chip_reset(ar);
        else
                return -ENOTSUPP;
 }
@@ -2679,6 +2838,7 @@ static int ath10k_pci_claim(struct ath10k *ar)
        pci_set_master(pdev);
 
        /* Arrange for access to Target SoC registers. */
+       ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
        ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
        if (!ar_pci->mem) {
                ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
@@ -2745,6 +2905,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
        case QCA6174_2_1_DEVICE_ID:
                hw_rev = ATH10K_HW_QCA6174;
                break;
+       case QCA99X0_2_0_DEVICE_ID:
+               hw_rev = ATH10K_HW_QCA99X0;
+               break;
        default:
                WARN_ON(1);
                return -ENOTSUPP;
index d7696ddc03c42b2b2622913f9c42674f22f84039..8d364fb8f743eb6f8ed488c88d83b8adf25b6536 100644 (file)
@@ -162,6 +162,7 @@ struct ath10k_pci {
        struct device *dev;
        struct ath10k *ar;
        void __iomem *mem;
+       size_t mem_len;
 
        /*
         * Number of MSI interrupts granted, 0 --> using legacy PCI line
@@ -236,18 +237,6 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
 #define CDC_WAR_MAGIC_STR   0xceef0000
 #define CDC_WAR_DATA_CE     4
 
-/*
- * TODO: Should be a function call specific to each Target-type.
- * This convoluted macro converts from Target CPU Virtual Address Space to CE
- * Address Space. As part of this process, we conservatively fetch the current
- * PCIE_BAR. MOST of the time, this should match the upper bits of PCI space
- * for this device; but that's not guaranteed.
- */
-#define TARG_CPU_SPACE_TO_CE_SPACE(ar, pci_addr, addr)                 \
-       (((ath10k_pci_read32(ar, (SOC_CORE_BASE_ADDRESS |               \
-         CORE_CTRL_ADDRESS)) & 0x7ff) << 21) |                         \
-        0x100000 | ((addr) & 0xfffff))
-
 /* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
 #define DIAG_ACCESS_CE_TIMEOUT_MS 10
 
diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c
new file mode 100644 (file)
index 0000000..3ca3fae
--- /dev/null
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* This file has implementation for code swap logic. With code swap feature,
+ * target can run the fw binary with even smaller IRAM size by using host
+ * memory to store some of the code segments.
+ */
+
+#include "core.h"
+#include "bmi.h"
+#include "debug.h"
+
+static int ath10k_swap_code_seg_fill(struct ath10k *ar,
+                                    struct ath10k_swap_code_seg_info *seg_info,
+                                    const void *data, size_t data_len)
+{
+       u8 *virt_addr = seg_info->virt_address[0];
+       u8 swap_magic[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ] = {};
+       const u8 *fw_data = data;
+       union ath10k_swap_code_seg_item *swap_item;
+       u32 length = 0;
+       u32 payload_len;
+       u32 total_payload_len = 0;
+       u32 size_left = data_len;
+
+       /* Parse swap bin and copy the content to host allocated memory.
+        * The format is Address, length and value. The last 4-bytes is
+        * target write address. Currently address field is not used.
+        */
+       seg_info->target_addr = -1;
+       while (size_left >= sizeof(*swap_item)) {
+               swap_item = (union ath10k_swap_code_seg_item *)fw_data;
+               payload_len = __le32_to_cpu(swap_item->tlv.length);
+               if ((payload_len > size_left) ||
+                   (payload_len == 0 &&
+                    size_left != sizeof(struct ath10k_swap_code_seg_tail))) {
+                       ath10k_err(ar, "refusing to parse invalid tlv length %d\n",
+                                  payload_len);
+                       return -EINVAL;
+               }
+
+               if (payload_len == 0) {
+                       if (memcmp(swap_item->tail.magic_signature, swap_magic,
+                                  ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ)) {
+                               ath10k_err(ar, "refusing an invalid swap file\n");
+                               return -EINVAL;
+                       }
+                       seg_info->target_addr =
+                               __le32_to_cpu(swap_item->tail.bmi_write_addr);
+                       break;
+               }
+
+               memcpy(virt_addr, swap_item->tlv.data, payload_len);
+               virt_addr += payload_len;
+               length = payload_len +  sizeof(struct ath10k_swap_code_seg_tlv);
+               size_left -= length;
+               fw_data += length;
+               total_payload_len += payload_len;
+       }
+
+       if (seg_info->target_addr == -1) {
+               ath10k_err(ar, "failed to parse invalid swap file\n");
+               return -EINVAL;
+       }
+       seg_info->seg_hw_info.swap_size = __cpu_to_le32(total_payload_len);
+
+       return 0;
+}
+
+static void
+ath10k_swap_code_seg_free(struct ath10k *ar,
+                         struct ath10k_swap_code_seg_info *seg_info)
+{
+       u32 seg_size;
+
+       if (!seg_info)
+               return;
+
+       if (!seg_info->virt_address[0])
+               return;
+
+       seg_size = __le32_to_cpu(seg_info->seg_hw_info.size);
+       dma_free_coherent(ar->dev, seg_size, seg_info->virt_address[0],
+                         seg_info->paddr[0]);
+}
+
+static struct ath10k_swap_code_seg_info *
+ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len)
+{
+       struct ath10k_swap_code_seg_info *seg_info;
+       void *virt_addr;
+       dma_addr_t paddr;
+
+       swap_bin_len = roundup(swap_bin_len, 2);
+       if (swap_bin_len > ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX) {
+               ath10k_err(ar, "refusing code swap bin because it is too big %zu > %d\n",
+                          swap_bin_len, ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX);
+               return NULL;
+       }
+
+       seg_info = devm_kzalloc(ar->dev, sizeof(*seg_info), GFP_KERNEL);
+       if (!seg_info)
+               return NULL;
+
+       virt_addr = dma_alloc_coherent(ar->dev, swap_bin_len, &paddr,
+                                      GFP_KERNEL);
+       if (!virt_addr) {
+               ath10k_err(ar, "failed to allocate dma coherent memory\n");
+               return NULL;
+       }
+
+       seg_info->seg_hw_info.bus_addr[0] = __cpu_to_le32(paddr);
+       seg_info->seg_hw_info.size = __cpu_to_le32(swap_bin_len);
+       seg_info->seg_hw_info.swap_size = __cpu_to_le32(swap_bin_len);
+       seg_info->seg_hw_info.num_segs =
+                       __cpu_to_le32(ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED);
+       seg_info->seg_hw_info.size_log2 = __cpu_to_le32(ilog2(swap_bin_len));
+       seg_info->virt_address[0] = virt_addr;
+       seg_info->paddr[0] = paddr;
+
+       return seg_info;
+}
+
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+                                  enum ath10k_swap_code_seg_bin_type type)
+{
+       int ret;
+       struct ath10k_swap_code_seg_info *seg_info = NULL;
+
+       switch (type) {
+       case ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW:
+               if (!ar->swap.firmware_swap_code_seg_info)
+                       return 0;
+
+               ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
+               seg_info = ar->swap.firmware_swap_code_seg_info;
+               break;
+       default:
+       case ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP:
+       case ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF:
+               ath10k_warn(ar, "ignoring unknown code swap binary type %d\n",
+                           type);
+               return 0;
+       }
+
+       ret = ath10k_bmi_write_memory(ar, seg_info->target_addr,
+                                     &seg_info->seg_hw_info,
+                                     sizeof(seg_info->seg_hw_info));
+       if (ret) {
+               ath10k_err(ar, "failed to write Code swap segment information (%d)\n",
+                          ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+void ath10k_swap_code_seg_release(struct ath10k *ar)
+{
+       ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info);
+       ar->swap.firmware_codeswap_data = NULL;
+       ar->swap.firmware_codeswap_len = 0;
+       ar->swap.firmware_swap_code_seg_info = NULL;
+}
+
+int ath10k_swap_code_seg_init(struct ath10k *ar)
+{
+       int ret;
+       struct ath10k_swap_code_seg_info *seg_info;
+
+       if (!ar->swap.firmware_codeswap_len || !ar->swap.firmware_codeswap_data)
+               return 0;
+
+       seg_info = ath10k_swap_code_seg_alloc(ar,
+                                             ar->swap.firmware_codeswap_len);
+       if (!seg_info) {
+               ath10k_err(ar, "failed to allocate fw code swap segment\n");
+               return -ENOMEM;
+       }
+
+       ret = ath10k_swap_code_seg_fill(ar, seg_info,
+                                       ar->swap.firmware_codeswap_data,
+                                       ar->swap.firmware_codeswap_len);
+
+       if (ret) {
+               ath10k_warn(ar, "failed to initialize fw code swap segment: %d\n",
+                           ret);
+               ath10k_swap_code_seg_free(ar, seg_info);
+               return ret;
+       }
+
+       ar->swap.firmware_swap_code_seg_info = seg_info;
+
+       return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h
new file mode 100644 (file)
index 0000000..5c89952
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _SWAP_H_
+#define _SWAP_H_
+
+#define ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX       (512 * 1024)
+#define ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ    12
+#define ATH10K_SWAP_CODE_SEG_NUM_MAX           16
+/* Currently only one swap segment is supported */
+#define ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED     1
+
+struct ath10k_swap_code_seg_tlv {
+       __le32 address;
+       __le32 length;
+       u8 data[0];
+} __packed;
+
+struct ath10k_swap_code_seg_tail {
+       u8 magic_signature[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ];
+       __le32 bmi_write_addr;
+} __packed;
+
+union ath10k_swap_code_seg_item {
+       struct ath10k_swap_code_seg_tlv tlv;
+       struct ath10k_swap_code_seg_tail tail;
+} __packed;
+
+enum ath10k_swap_code_seg_bin_type {
+        ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP,
+        ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW,
+        ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF,
+};
+
+struct ath10k_swap_code_seg_hw_info {
+       /* Swap binary image size */
+       __le32 swap_size;
+       __le32 num_segs;
+
+       /* Swap data size */
+       __le32 size;
+       __le32 size_log2;
+       __le32 bus_addr[ATH10K_SWAP_CODE_SEG_NUM_MAX];
+       __le64 reserved[ATH10K_SWAP_CODE_SEG_NUM_MAX];
+} __packed;
+
+struct ath10k_swap_code_seg_info {
+       struct ath10k_swap_code_seg_hw_info seg_hw_info;
+       void *virt_address[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
+       u32 target_addr;
+       dma_addr_t paddr[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
+};
+
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+                                  enum ath10k_swap_code_seg_bin_type type);
+void ath10k_swap_code_seg_release(struct ath10k *ar);
+int ath10k_swap_code_seg_init(struct ath10k *ar);
+
+#endif
index a417aae52623de0e86b951029c0991717eaf39dc..768bef6290995b4e2b4c80c6c3e09e7b3f15893a 100644 (file)
@@ -450,4 +450,7 @@ Fw Mode/SubMode Mask
 #define QCA6174_BOARD_DATA_SZ     8192
 #define QCA6174_BOARD_EXT_DATA_SZ 0
 
+#define QCA99X0_BOARD_DATA_SZ    12288
+#define QCA99X0_BOARD_EXT_DATA_SZ 0
+
 #endif /* __TARGADDRS_H__ */
index 826500bb2b1b247233fbf1998733c2d8c16b3fe7..6cf289158840a3ba01da0f82c81a4e07916a4b70 100644 (file)
@@ -147,9 +147,9 @@ struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
 static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
                                       const u8 *addr, bool expect_mapped)
 {
-       int ret;
+       long time_left;
 
-       ret = wait_event_timeout(ar->peer_mapping_wq, ({
+       time_left = wait_event_timeout(ar->peer_mapping_wq, ({
                        bool mapped;
 
                        spin_lock_bh(&ar->data_lock);
@@ -160,7 +160,7 @@ static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
                         test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
                }), 3*HZ);
 
-       if (ret <= 0)
+       if (time_left == 0)
                return -ETIMEDOUT;
 
        return 0;
index 8fdba3865c960e699bacf0998fd911d1e3a231b1..4189d4a90ce0a8e6eeca9a9f2a25a1818fce3548 100644 (file)
@@ -377,12 +377,34 @@ static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
                   "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
                   pause_id, action, vdev_map, peer_id, tid_map);
 
-       for (vdev_id = 0; vdev_map; vdev_id++) {
-               if (!(vdev_map & BIT(vdev_id)))
-                       continue;
-
-               vdev_map &= ~BIT(vdev_id);
-               ath10k_mac_handle_tx_pause(ar, vdev_id, pause_id, action);
+       switch (pause_id) {
+       case WMI_TLV_TX_PAUSE_ID_MCC:
+       case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
+       case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
+       case WMI_TLV_TX_PAUSE_ID_AP_PS:
+       case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
+               for (vdev_id = 0; vdev_map; vdev_id++) {
+                       if (!(vdev_map & BIT(vdev_id)))
+                               continue;
+
+                       vdev_map &= ~BIT(vdev_id);
+                       ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id,
+                                                       action);
+               }
+               break;
+       case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
+       case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
+       case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
+       case WMI_TLV_TX_PAUSE_ID_HOST:
+               ath10k_dbg(ar, ATH10K_DBG_MAC,
+                          "mac ignoring unsupported tx pause id %d\n",
+                          pause_id);
+               break;
+       default:
+               ath10k_dbg(ar, ATH10K_DBG_MAC,
+                          "mac ignoring unknown tx pause vdev %d\n",
+                          pause_id);
+               break;
        }
 
        kfree(tb);
@@ -709,6 +731,8 @@ static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
                                         const void *ptr, void *data)
 {
        struct wmi_tlv_swba_parse *swba = data;
+       struct wmi_tim_info_arg *tim_info_arg;
+       const struct wmi_tim_info *tim_info_ev = ptr;
 
        if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
                return -EPROTO;
@@ -716,7 +740,21 @@ static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
        if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
                return -ENOBUFS;
 
-       swba->arg->tim_info[swba->n_tim++] = ptr;
+       if (__le32_to_cpu(tim_info_ev->tim_len) >
+            sizeof(tim_info_ev->tim_bitmap)) {
+               ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+               return -EPROTO;
+       }
+
+       tim_info_arg = &swba->arg->tim_info[swba->n_tim];
+       tim_info_arg->tim_len = tim_info_ev->tim_len;
+       tim_info_arg->tim_mcast = tim_info_ev->tim_mcast;
+       tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap;
+       tim_info_arg->tim_changed = tim_info_ev->tim_changed;
+       tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending;
+
+       swba->n_tim++;
+
        return 0;
 }
 
@@ -3151,6 +3189,38 @@ static struct wmi_cmd_map wmi_tlv_cmd_map = {
        .tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
        .tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
        .adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
+       .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+       .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+       .nan_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+       .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+       .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+       .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
 };
 
 static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
@@ -3204,6 +3274,48 @@ static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
        .burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
        .burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
        .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
+       .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+       .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+       .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+       .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+       .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+       .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+       .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+       .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+       .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+       .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+       .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+       .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+       .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+       .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+       .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+       .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+       .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+       .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+       .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+       .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+       .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+       .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
 };
 
 static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
@@ -3262,6 +3374,22 @@ static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
        .tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
        .ap_detect_out_of_sync_sleeping_sta_time_secs =
                                        WMI_TLV_VDEV_PARAM_UNSUPPORTED,
+       .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+       .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+       .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+       .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+       .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+       .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+       .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+       .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+       .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+       .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
 };
 
 static const struct wmi_ops wmi_tlv_ops = {
index 6c046c244705fe69f23fa9207113c6cefec9a78e..0791a4336e80f4b4886b62ef89eb892bb73342ce 100644 (file)
@@ -148,6 +148,48 @@ static struct wmi_cmd_map wmi_cmd_map = {
        .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
        .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
        .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+       .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+       .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+       .nan_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+       .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+       .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+       .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+       .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+       .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
 };
 
 /* 10.X WMI cmd track */
@@ -271,6 +313,48 @@ static struct wmi_cmd_map wmi_10x_cmd_map = {
        .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
        .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
        .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+       .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+       .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+       .nan_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+       .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+       .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+       .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+       .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+       .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
 };
 
 /* 10.2.4 WMI cmd track */
@@ -393,6 +477,231 @@ static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
        .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
        .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
        .pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
+       .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+       .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+       .nan_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+       .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+       .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+       .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+       .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+       .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+/* 10.4 WMI cmd track */
+static struct wmi_cmd_map wmi_10_4_cmd_map = {
+       .init_cmdid = WMI_10_4_INIT_CMDID,
+       .start_scan_cmdid = WMI_10_4_START_SCAN_CMDID,
+       .stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID,
+       .scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID,
+       .scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
+       .pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
+       .pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID,
+       .pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID,
+       .pdev_pktlog_enable_cmdid = WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
+       .pdev_pktlog_disable_cmdid = WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
+       .pdev_set_wmm_params_cmdid = WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
+       .pdev_set_ht_cap_ie_cmdid = WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
+       .pdev_set_vht_cap_ie_cmdid = WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
+       .pdev_set_dscp_tid_map_cmdid = WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
+       .pdev_set_quiet_mode_cmdid = WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
+       .pdev_green_ap_ps_enable_cmdid = WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+       .pdev_get_tpc_config_cmdid = WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
+       .pdev_set_base_macaddr_cmdid = WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
+       .vdev_create_cmdid = WMI_10_4_VDEV_CREATE_CMDID,
+       .vdev_delete_cmdid = WMI_10_4_VDEV_DELETE_CMDID,
+       .vdev_start_request_cmdid = WMI_10_4_VDEV_START_REQUEST_CMDID,
+       .vdev_restart_request_cmdid = WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
+       .vdev_up_cmdid = WMI_10_4_VDEV_UP_CMDID,
+       .vdev_stop_cmdid = WMI_10_4_VDEV_STOP_CMDID,
+       .vdev_down_cmdid = WMI_10_4_VDEV_DOWN_CMDID,
+       .vdev_set_param_cmdid = WMI_10_4_VDEV_SET_PARAM_CMDID,
+       .vdev_install_key_cmdid = WMI_10_4_VDEV_INSTALL_KEY_CMDID,
+       .peer_create_cmdid = WMI_10_4_PEER_CREATE_CMDID,
+       .peer_delete_cmdid = WMI_10_4_PEER_DELETE_CMDID,
+       .peer_flush_tids_cmdid = WMI_10_4_PEER_FLUSH_TIDS_CMDID,
+       .peer_set_param_cmdid = WMI_10_4_PEER_SET_PARAM_CMDID,
+       .peer_assoc_cmdid = WMI_10_4_PEER_ASSOC_CMDID,
+       .peer_add_wds_entry_cmdid = WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
+       .peer_remove_wds_entry_cmdid = WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
+       .peer_mcast_group_cmdid = WMI_10_4_PEER_MCAST_GROUP_CMDID,
+       .bcn_tx_cmdid = WMI_10_4_BCN_TX_CMDID,
+       .pdev_send_bcn_cmdid = WMI_10_4_PDEV_SEND_BCN_CMDID,
+       .bcn_tmpl_cmdid = WMI_10_4_BCN_PRB_TMPL_CMDID,
+       .bcn_filter_rx_cmdid = WMI_10_4_BCN_FILTER_RX_CMDID,
+       .prb_req_filter_rx_cmdid = WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
+       .mgmt_tx_cmdid = WMI_10_4_MGMT_TX_CMDID,
+       .prb_tmpl_cmdid = WMI_10_4_PRB_TMPL_CMDID,
+       .addba_clear_resp_cmdid = WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
+       .addba_send_cmdid = WMI_10_4_ADDBA_SEND_CMDID,
+       .addba_status_cmdid = WMI_10_4_ADDBA_STATUS_CMDID,
+       .delba_send_cmdid = WMI_10_4_DELBA_SEND_CMDID,
+       .addba_set_resp_cmdid = WMI_10_4_ADDBA_SET_RESP_CMDID,
+       .send_singleamsdu_cmdid = WMI_10_4_SEND_SINGLEAMSDU_CMDID,
+       .sta_powersave_mode_cmdid = WMI_10_4_STA_POWERSAVE_MODE_CMDID,
+       .sta_powersave_param_cmdid = WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
+       .sta_mimo_ps_mode_cmdid = WMI_10_4_STA_MIMO_PS_MODE_CMDID,
+       .pdev_dfs_enable_cmdid = WMI_10_4_PDEV_DFS_ENABLE_CMDID,
+       .pdev_dfs_disable_cmdid = WMI_10_4_PDEV_DFS_DISABLE_CMDID,
+       .roam_scan_mode = WMI_10_4_ROAM_SCAN_MODE,
+       .roam_scan_rssi_threshold = WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
+       .roam_scan_period = WMI_10_4_ROAM_SCAN_PERIOD,
+       .roam_scan_rssi_change_threshold =
+                               WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+       .roam_ap_profile = WMI_10_4_ROAM_AP_PROFILE,
+       .ofl_scan_add_ap_profile = WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
+       .ofl_scan_remove_ap_profile = WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
+       .ofl_scan_period = WMI_10_4_OFL_SCAN_PERIOD,
+       .p2p_dev_set_device_info = WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
+       .p2p_dev_set_discoverability = WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
+       .p2p_go_set_beacon_ie = WMI_10_4_P2P_GO_SET_BEACON_IE,
+       .p2p_go_set_probe_resp_ie = WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
+       .p2p_set_vendor_ie_data_cmdid = WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
+       .ap_ps_peer_param_cmdid = WMI_10_4_AP_PS_PEER_PARAM_CMDID,
+       .ap_ps_peer_uapsd_coex_cmdid = WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
+       .peer_rate_retry_sched_cmdid = WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
+       .wlan_profile_trigger_cmdid = WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
+       .wlan_profile_set_hist_intvl_cmdid =
+                               WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+       .wlan_profile_get_profile_data_cmdid =
+                               WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+       .wlan_profile_enable_profile_id_cmdid =
+                               WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+       .wlan_profile_list_profile_id_cmdid =
+                               WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+       .pdev_suspend_cmdid = WMI_10_4_PDEV_SUSPEND_CMDID,
+       .pdev_resume_cmdid = WMI_10_4_PDEV_RESUME_CMDID,
+       .add_bcn_filter_cmdid = WMI_10_4_ADD_BCN_FILTER_CMDID,
+       .rmv_bcn_filter_cmdid = WMI_10_4_RMV_BCN_FILTER_CMDID,
+       .wow_add_wake_pattern_cmdid = WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
+       .wow_del_wake_pattern_cmdid = WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
+       .wow_enable_disable_wake_event_cmdid =
+                               WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+       .wow_enable_cmdid = WMI_10_4_WOW_ENABLE_CMDID,
+       .wow_hostwakeup_from_sleep_cmdid =
+                               WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+       .rtt_measreq_cmdid = WMI_10_4_RTT_MEASREQ_CMDID,
+       .rtt_tsf_cmdid = WMI_10_4_RTT_TSF_CMDID,
+       .vdev_spectral_scan_configure_cmdid =
+                               WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+       .vdev_spectral_scan_enable_cmdid =
+                               WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+       .request_stats_cmdid = WMI_10_4_REQUEST_STATS_CMDID,
+       .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+       .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .gtk_offload_cmdid = WMI_10_4_GTK_OFFLOAD_CMDID,
+       .csa_offload_enable_cmdid = WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
+       .csa_offload_chanswitch_cmdid = WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
+       .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+       .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+       .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+       .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+       .echo_cmdid = WMI_10_4_ECHO_CMDID,
+       .pdev_utf_cmdid = WMI_10_4_PDEV_UTF_CMDID,
+       .dbglog_cfg_cmdid = WMI_10_4_DBGLOG_CFG_CMDID,
+       .pdev_qvit_cmdid = WMI_10_4_PDEV_QVIT_CMDID,
+       .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_set_keepalive_cmdid = WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
+       .vdev_get_keepalive_cmdid = WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
+       .force_fw_hang_cmdid = WMI_10_4_FORCE_FW_HANG_CMDID,
+       .gpio_config_cmdid = WMI_10_4_GPIO_CONFIG_CMDID,
+       .gpio_output_cmdid = WMI_10_4_GPIO_OUTPUT_CMDID,
+       .pdev_get_temperature_cmdid = WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
+       .vdev_set_wmm_params_cmdid = WMI_CMD_UNSUPPORTED,
+       .tdls_set_state_cmdid = WMI_CMD_UNSUPPORTED,
+       .tdls_peer_update_cmdid = WMI_CMD_UNSUPPORTED,
+       .adaptive_qcs_cmdid = WMI_CMD_UNSUPPORTED,
+       .scan_update_request_cmdid = WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
+       .vdev_standby_response_cmdid = WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
+       .vdev_resume_response_cmdid = WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
+       .wlan_peer_caching_add_peer_cmdid =
+                       WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
+       .wlan_peer_caching_evict_peer_cmdid =
+                       WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
+       .wlan_peer_caching_restore_peer_cmdid =
+                       WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
+       .wlan_peer_caching_print_all_peers_info_cmdid =
+                       WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
+       .peer_update_wds_entry_cmdid = WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
+       .peer_add_proxy_sta_entry_cmdid =
+                       WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+       .rtt_keepalive_cmdid = WMI_10_4_RTT_KEEPALIVE_CMDID,
+       .oem_req_cmdid = WMI_10_4_OEM_REQ_CMDID,
+       .nan_cmdid = WMI_10_4_NAN_CMDID,
+       .vdev_ratemask_cmdid = WMI_10_4_VDEV_RATEMASK_CMDID,
+       .qboost_cfg_cmdid = WMI_10_4_QBOOST_CFG_CMDID,
+       .pdev_smart_ant_enable_cmdid = WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
+       .pdev_smart_ant_set_rx_antenna_cmdid =
+                       WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+       .peer_smart_ant_set_tx_antenna_cmdid =
+                       WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+       .peer_smart_ant_set_train_info_cmdid =
+                       WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+       .peer_smart_ant_set_node_config_ops_cmdid =
+                       WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+       .pdev_set_antenna_switch_table_cmdid =
+                       WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+       .pdev_set_ctl_table_cmdid = WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
+       .pdev_set_mimogain_table_cmdid = WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+       .pdev_ratepwr_table_cmdid = WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
+       .pdev_ratepwr_chainmsk_table_cmdid =
+                       WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
+       .pdev_fips_cmdid = WMI_10_4_PDEV_FIPS_CMDID,
+       .tt_set_conf_cmdid = WMI_10_4_TT_SET_CONF_CMDID,
+       .fwtest_cmdid = WMI_10_4_FWTEST_CMDID,
+       .vdev_atf_request_cmdid = WMI_10_4_VDEV_ATF_REQUEST_CMDID,
+       .peer_atf_request_cmdid = WMI_10_4_PEER_ATF_REQUEST_CMDID,
+       .pdev_get_ani_cck_config_cmdid = WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+       .pdev_get_ani_ofdm_config_cmdid =
+                       WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+       .pdev_reserve_ast_entry_cmdid = WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
+       .pdev_get_nfcal_power_cmdid = WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
+       .pdev_get_tpc_cmdid = WMI_10_4_PDEV_GET_TPC_CMDID,
+       .pdev_get_ast_info_cmdid = WMI_10_4_PDEV_GET_AST_INFO_CMDID,
+       .vdev_set_dscp_tid_map_cmdid = WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
+       .pdev_get_info_cmdid = WMI_10_4_PDEV_GET_INFO_CMDID,
+       .vdev_get_info_cmdid = WMI_10_4_VDEV_GET_INFO_CMDID,
+       .vdev_filter_neighbor_rx_packets_cmdid =
+                       WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+       .mu_cal_start_cmdid = WMI_10_4_MU_CAL_START_CMDID,
+       .set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
+       .pdev_bss_chan_info_request_cmdid =
+                       WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
 };
 
 /* MAIN WMI VDEV param map */
@@ -452,6 +761,22 @@ static struct wmi_vdev_param_map wmi_vdev_param_map = {
        .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
        .ap_detect_out_of_sync_sleeping_sta_time_secs =
                                        WMI_VDEV_PARAM_UNSUPPORTED,
+       .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+       .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+       .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+       .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+       .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+       .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+       .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+       .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+       .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+       .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
 };
 
 /* 10.X WMI VDEV param map */
@@ -511,6 +836,22 @@ static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
        .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
        .ap_detect_out_of_sync_sleeping_sta_time_secs =
                WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+       .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+       .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+       .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+       .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+       .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+       .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+       .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+       .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+       .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+       .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
 };
 
 static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
@@ -569,6 +910,97 @@ static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
        .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
        .ap_detect_out_of_sync_sleeping_sta_time_secs =
                WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+       .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+       .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+       .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+       .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+       .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+       .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+       .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+       .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+       .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+       .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+       .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
+       .rts_threshold = WMI_10_4_VDEV_PARAM_RTS_THRESHOLD,
+       .fragmentation_threshold = WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+       .beacon_interval = WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
+       .listen_interval = WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
+       .multicast_rate = WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
+       .mgmt_tx_rate = WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
+       .slot_time = WMI_10_4_VDEV_PARAM_SLOT_TIME,
+       .preamble = WMI_10_4_VDEV_PARAM_PREAMBLE,
+       .swba_time = WMI_10_4_VDEV_PARAM_SWBA_TIME,
+       .wmi_vdev_stats_update_period = WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
+       .wmi_vdev_pwrsave_ageout_time = WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
+       .wmi_vdev_host_swba_interval = WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
+       .dtim_period = WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
+       .wmi_vdev_oc_scheduler_air_time_limit =
+              WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+       .wds = WMI_10_4_VDEV_PARAM_WDS,
+       .atim_window = WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
+       .bmiss_count_max = WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
+       .bmiss_first_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
+       .bmiss_final_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
+       .feature_wmm = WMI_10_4_VDEV_PARAM_FEATURE_WMM,
+       .chwidth = WMI_10_4_VDEV_PARAM_CHWIDTH,
+       .chextoffset = WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
+       .disable_htprotection = WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
+       .sta_quickkickout = WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
+       .mgmt_rate = WMI_10_4_VDEV_PARAM_MGMT_RATE,
+       .protection_mode = WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
+       .fixed_rate = WMI_10_4_VDEV_PARAM_FIXED_RATE,
+       .sgi = WMI_10_4_VDEV_PARAM_SGI,
+       .ldpc = WMI_10_4_VDEV_PARAM_LDPC,
+       .tx_stbc = WMI_10_4_VDEV_PARAM_TX_STBC,
+       .rx_stbc = WMI_10_4_VDEV_PARAM_RX_STBC,
+       .intra_bss_fwd = WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
+       .def_keyid = WMI_10_4_VDEV_PARAM_DEF_KEYID,
+       .nss = WMI_10_4_VDEV_PARAM_NSS,
+       .bcast_data_rate = WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
+       .mcast_data_rate = WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
+       .mcast_indicate = WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
+       .dhcp_indicate = WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
+       .unknown_dest_indicate = WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+       .ap_keepalive_min_idle_inactive_time_secs =
+              WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+       .ap_keepalive_max_idle_inactive_time_secs =
+              WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+       .ap_keepalive_max_unresponsive_time_secs =
+              WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+       .ap_enable_nawds = WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
+       .mcast2ucast_set = WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
+       .enable_rtscts = WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
+       .txbf = WMI_10_4_VDEV_PARAM_TXBF,
+       .packet_powersave = WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
+       .drop_unencry = WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
+       .tx_encap_type = WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
+       .ap_detect_out_of_sync_sleeping_sta_time_secs =
+              WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+       .rc_num_retries = WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
+       .cabq_maxdur = WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
+       .mfptest_set = WMI_10_4_VDEV_PARAM_MFPTEST_SET,
+       .rts_fixed_rate = WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
+       .vht_sgimask = WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
+       .vht80_ratemask = WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
+       .early_rx_adjust_enable = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+       .early_rx_tgt_bmiss_num = WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+       .early_rx_bmiss_sample_cycle =
+              WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+       .early_rx_slop_step = WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+       .early_rx_init_slop = WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+       .early_rx_adjust_pause = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+       .proxy_sta = WMI_10_4_VDEV_PARAM_PROXY_STA,
+       .meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
+       .rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
+       .bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
 };
 
 static struct wmi_pdev_param_map wmi_pdev_param_map = {
@@ -621,6 +1053,48 @@ static struct wmi_pdev_param_map wmi_pdev_param_map = {
        .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
        .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
        .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
+       .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+       .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+       .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+       .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+       .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+       .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+       .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+       .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+       .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+       .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+       .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+       .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+       .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+       .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+       .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+       .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+       .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+       .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+       .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+       .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+       .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+       .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
 };
 
 static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
@@ -674,6 +1148,48 @@ static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
        .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
        .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
        .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
+       .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+       .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+       .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+       .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+       .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+       .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+       .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+       .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+       .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+       .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+       .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+       .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+       .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+       .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+       .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+       .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+       .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+       .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+       .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+       .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+       .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+       .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
 };
 
 static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
@@ -727,6 +1243,48 @@ static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
        .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
        .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
        .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
+       .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+       .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+       .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+       .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+       .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+       .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+       .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+       .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+       .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+       .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+       .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+       .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+       .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+       .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+       .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+       .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+       .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+       .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+       .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+       .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+       .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+       .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+       .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+       .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+       .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+       .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
 };
 
 /* firmware 10.2 specific mappings */
@@ -849,6 +1407,139 @@ static struct wmi_cmd_map wmi_10_2_cmd_map = {
        .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
        .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
        .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+       .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+       .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+       .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+       .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+       .nan_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+       .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+       .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+       .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+       .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+       .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = {
+       .tx_chain_mask = WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK,
+       .rx_chain_mask = WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
+       .txpower_limit2g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
+       .txpower_limit5g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
+       .txpower_scale = WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
+       .beacon_gen_mode = WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
+       .beacon_tx_mode = WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
+       .resmgr_offchan_mode = WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+       .protection_mode = WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
+       .dynamic_bw = WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
+       .non_agg_sw_retry_th = WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+       .agg_sw_retry_th = WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
+       .sta_kickout_th = WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
+       .ac_aggrsize_scaling = WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+       .ltr_enable = WMI_10_4_PDEV_PARAM_LTR_ENABLE,
+       .ltr_ac_latency_be = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
+       .ltr_ac_latency_bk = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
+       .ltr_ac_latency_vi = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
+       .ltr_ac_latency_vo = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
+       .ltr_ac_latency_timeout = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+       .ltr_sleep_override = WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+       .ltr_rx_override = WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
+       .ltr_tx_activity_timeout = WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+       .l1ss_enable = WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
+       .dsleep_enable = WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
+       .pcielp_txbuf_flush = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+       .pcielp_txbuf_watermark = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+       .pcielp_txbuf_tmo_en = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+       .pcielp_txbuf_tmo_value = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+       .pdev_stats_update_period =
+                       WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+       .vdev_stats_update_period =
+                       WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+       .peer_stats_update_period =
+                       WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+       .bcnflt_stats_update_period =
+                       WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+       .pmf_qos = WMI_10_4_PDEV_PARAM_PMF_QOS,
+       .arp_ac_override = WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
+       .dcs = WMI_10_4_PDEV_PARAM_DCS,
+       .ani_enable = WMI_10_4_PDEV_PARAM_ANI_ENABLE,
+       .ani_poll_period = WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
+       .ani_listen_period = WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
+       .ani_ofdm_level = WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
+       .ani_cck_level = WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
+       .dyntxchain = WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
+       .proxy_sta = WMI_10_4_PDEV_PARAM_PROXY_STA,
+       .idle_ps_config = WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
+       .power_gating_sleep = WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
+       .fast_channel_reset = WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
+       .burst_dur = WMI_10_4_PDEV_PARAM_BURST_DUR,
+       .burst_enable = WMI_10_4_PDEV_PARAM_BURST_ENABLE,
+       .cal_period = WMI_10_4_PDEV_PARAM_CAL_PERIOD,
+       .aggr_burst = WMI_10_4_PDEV_PARAM_AGGR_BURST,
+       .rx_decap_mode = WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
+       .smart_antenna_default_antenna =
+                       WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+       .igmpmld_override = WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
+       .igmpmld_tid = WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
+       .antenna_gain = WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
+       .rx_filter = WMI_10_4_PDEV_PARAM_RX_FILTER,
+       .set_mcast_to_ucast_tid = WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
+       .proxy_sta_mode = WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
+       .set_mcast2ucast_mode = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+       .set_mcast2ucast_buffer = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+       .remove_mcast2ucast_buffer =
+                       WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+       .peer_sta_ps_statechg_enable =
+                       WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+       .igmpmld_ac_override = WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+       .block_interbss = WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
+       .set_disable_reset_cmdid = WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+       .set_msdu_ttl_cmdid = WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+       .set_ppdu_duration_cmdid = WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+       .txbf_sound_period_cmdid = WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+       .set_promisc_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+       .set_burst_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
+       .en_stats = WMI_10_4_PDEV_PARAM_EN_STATS,
+       .mu_group_policy = WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
+       .noise_detection = WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
+       .noise_threshold = WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
+       .dpd_enable = WMI_10_4_PDEV_PARAM_DPD_ENABLE,
+       .set_mcast_bcast_echo = WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+       .atf_strict_sch = WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
+       .atf_sched_duration = WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
+       .ant_plzn = WMI_10_4_PDEV_PARAM_ANT_PLZN,
+       .mgmt_retry_limit = WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
+       .sensitivity_level = WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
+       .signed_txpower_2g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
+       .signed_txpower_5g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
+       .enable_per_tid_amsdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+       .enable_per_tid_ampdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+       .cca_threshold = WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
+       .rts_fixed_rate = WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
+       .pdev_reset = WMI_10_4_PDEV_PARAM_PDEV_RESET,
+       .wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+       .arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
+       .arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
 };
 
 void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
@@ -1232,6 +1923,8 @@ ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
                        return "completed [preempted]";
                case WMI_SCAN_REASON_TIMEDOUT:
                        return "completed [timedout]";
+               case WMI_SCAN_REASON_INTERNAL_FAILURE:
+                       return "completed [internal err]";
                case WMI_SCAN_REASON_MAX:
                        break;
                }
@@ -1246,6 +1939,10 @@ ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
                return "preempted";
        case WMI_SCAN_EVENT_START_FAILED:
                return "start failed";
+       case WMI_SCAN_EVENT_RESTARTED:
+               return "restarted";
+       case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
+               return "foreign channel exit";
        default:
                return "unknown";
        }
@@ -1321,6 +2018,8 @@ int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
                break;
        case WMI_SCAN_EVENT_DEQUEUED:
        case WMI_SCAN_EVENT_PREEMPTED:
+       case WMI_SCAN_EVENT_RESTARTED:
+       case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
        default:
                break;
        }
@@ -1433,6 +2132,40 @@ static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
        return 0;
 }
 
+static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
+                                             struct sk_buff *skb,
+                                             struct wmi_mgmt_rx_ev_arg *arg)
+{
+       struct wmi_10_4_mgmt_rx_event *ev;
+       struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
+       size_t pull_len;
+       u32 msdu_len;
+
+       ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
+       ev_hdr = &ev->hdr;
+       pull_len = sizeof(*ev);
+
+       if (skb->len < pull_len)
+               return -EPROTO;
+
+       skb_pull(skb, pull_len);
+       arg->channel = ev_hdr->channel;
+       arg->buf_len = ev_hdr->buf_len;
+       arg->status = ev_hdr->status;
+       arg->snr = ev_hdr->snr;
+       arg->phy_mode = ev_hdr->phy_mode;
+       arg->rate = ev_hdr->rate;
+
+       msdu_len = __le32_to_cpu(arg->buf_len);
+       if (skb->len < msdu_len)
+               return -EPROTO;
+
+       /* Make sure bytes added for padding are removed. */
+       skb_trim(skb, msdu_len);
+
+       return 0;
+}
+
 int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
 {
        struct wmi_mgmt_rx_ev_arg arg = {};
@@ -1593,6 +2326,29 @@ static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb,
        return 0;
 }
 
+static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar,
+                                             struct sk_buff *skb,
+                                             struct wmi_ch_info_ev_arg *arg)
+{
+       struct wmi_10_4_chan_info_event *ev = (void *)skb->data;
+
+       if (skb->len < sizeof(*ev))
+               return -EPROTO;
+
+       skb_pull(skb, sizeof(*ev));
+       arg->err_code = ev->err_code;
+       arg->freq = ev->freq;
+       arg->cmd_flags = ev->cmd_flags;
+       arg->noise_floor = ev->noise_floor;
+       arg->rx_clear_count = ev->rx_clear_count;
+       arg->cycle_count = ev->cycle_count;
+       arg->chan_tx_pwr_range = ev->chan_tx_pwr_range;
+       arg->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
+       arg->rx_frame_count = ev->rx_frame_count;
+
+       return 0;
+}
+
 void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
 {
        struct wmi_ch_info_ev_arg arg = {};
@@ -2149,33 +2905,42 @@ exit:
 static void ath10k_wmi_update_tim(struct ath10k *ar,
                                  struct ath10k_vif *arvif,
                                  struct sk_buff *bcn,
-                                 const struct wmi_tim_info *tim_info)
+                                 const struct wmi_tim_info_arg *tim_info)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
        struct ieee80211_tim_ie *tim;
        u8 *ies, *ie;
        u8 ie_len, pvm_len;
        __le32 t;
-       u32 v;
+       u32 v, tim_len;
+
+       /* When FW reports 0 in tim_len, ensure atleast first byte
+        * in tim_bitmap is considered for pvm calculation.
+        */
+       tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1;
 
        /* if next SWBA has no tim_changed the tim_bitmap is garbage.
         * we must copy the bitmap upon change and reuse it later */
        if (__le32_to_cpu(tim_info->tim_changed)) {
                int i;
 
-               BUILD_BUG_ON(sizeof(arvif->u.ap.tim_bitmap) !=
-                            sizeof(tim_info->tim_bitmap));
+               if (sizeof(arvif->u.ap.tim_bitmap) < tim_len) {
+                       ath10k_warn(ar, "SWBA TIM field is too big (%u), truncated it to %zu",
+                                   tim_len, sizeof(arvif->u.ap.tim_bitmap));
+                       tim_len = sizeof(arvif->u.ap.tim_bitmap);
+               }
 
-               for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++) {
+               for (i = 0; i < tim_len; i++) {
                        t = tim_info->tim_bitmap[i / 4];
                        v = __le32_to_cpu(t);
                        arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
                }
 
-               /* FW reports either length 0 or 16
-                * so we calculate this on our own */
+               /* FW reports either length 0 or length based on max supported
+                * station. so we calculate this on our own
+                */
                arvif->u.ap.tim_len = 0;
-               for (i = 0; i < sizeof(arvif->u.ap.tim_bitmap); i++)
+               for (i = 0; i < tim_len; i++)
                        if (arvif->u.ap.tim_bitmap[i])
                                arvif->u.ap.tim_len = i;
 
@@ -2199,7 +2964,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
        pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
 
        if (pvm_len < arvif->u.ap.tim_len) {
-               int expand_size = sizeof(arvif->u.ap.tim_bitmap) - pvm_len;
+               int expand_size = tim_len - pvm_len;
                int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
                void *next_ie = ie + 2 + ie_len;
 
@@ -2214,7 +2979,7 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
                }
        }
 
-       if (pvm_len > sizeof(arvif->u.ap.tim_bitmap)) {
+       if (pvm_len > tim_len) {
                ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
                return;
        }
@@ -2278,7 +3043,21 @@ static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
                if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
                        break;
 
-               arg->tim_info[i] = &ev->bcn_info[i].tim_info;
+               if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
+                    sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
+                       ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+                       return -EPROTO;
+               }
+
+               arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
+               arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
+               arg->tim_info[i].tim_bitmap =
+                               ev->bcn_info[i].tim_info.tim_bitmap;
+               arg->tim_info[i].tim_changed =
+                               ev->bcn_info[i].tim_info.tim_changed;
+               arg->tim_info[i].tim_num_ps_pending =
+                               ev->bcn_info[i].tim_info.tim_num_ps_pending;
+
                arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info;
                i++;
        }
@@ -2286,12 +3065,69 @@ static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
        return 0;
 }
 
+static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar,
+                                          struct sk_buff *skb,
+                                          struct wmi_swba_ev_arg *arg)
+{
+       struct wmi_10_4_host_swba_event *ev = (void *)skb->data;
+       u32 map, tim_len;
+       size_t i;
+
+       if (skb->len < sizeof(*ev))
+               return -EPROTO;
+
+       skb_pull(skb, sizeof(*ev));
+       arg->vdev_map = ev->vdev_map;
+
+       for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
+               if (!(map & BIT(0)))
+                       continue;
+
+               /* If this happens there were some changes in firmware and
+                * ath10k should update the max size of tim_info array.
+                */
+               if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
+                       break;
+
+               if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
+                     sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
+                       ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+                       return -EPROTO;
+               }
+
+               tim_len = __le32_to_cpu(ev->bcn_info[i].tim_info.tim_len);
+               if (tim_len) {
+                       /* Exclude 4 byte guard length */
+                       tim_len -= 4;
+                       arg->tim_info[i].tim_len = __cpu_to_le32(tim_len);
+               } else {
+                       arg->tim_info[i].tim_len = 0;
+               }
+
+               arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
+               arg->tim_info[i].tim_bitmap =
+                               ev->bcn_info[i].tim_info.tim_bitmap;
+               arg->tim_info[i].tim_changed =
+                               ev->bcn_info[i].tim_info.tim_changed;
+               arg->tim_info[i].tim_num_ps_pending =
+                               ev->bcn_info[i].tim_info.tim_num_ps_pending;
+
+               /* 10.4 firmware doesn't have p2p support. notice of absence
+                * info can be ignored for now.
+                */
+
+               i++;
+       }
+
+       return 0;
+}
+
 void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
 {
        struct wmi_swba_ev_arg arg = {};
        u32 map;
        int i = -1;
-       const struct wmi_tim_info *tim_info;
+       const struct wmi_tim_info_arg *tim_info;
        const struct wmi_p2p_noa_info *noa_info;
        struct ath10k_vif *arvif;
        struct sk_buff *bcn;
@@ -2320,7 +3156,7 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
                        break;
                }
 
-               tim_info = arg.tim_info[i];
+               tim_info = &arg.tim_info[i];
                noa_info = arg.noa_info[i];
 
                ath10k_dbg(ar, ATH10K_DBG_MGMT,
@@ -2335,6 +3171,10 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
                           __le32_to_cpu(tim_info->tim_bitmap[1]),
                           __le32_to_cpu(tim_info->tim_bitmap[0]));
 
+               /* TODO: Only first 4 word from tim_bitmap is dumped.
+                * Extend debug code to dump full tim_bitmap.
+                */
+
                arvif = ath10k_get_arvif(ar, vdev_id);
                if (arvif == NULL) {
                        ath10k_warn(ar, "no vif for vdev_id %d found\n",
@@ -3075,10 +3915,10 @@ void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
        if (ar->fw_api == 1 && ar->fw_version_build > 636)
                set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
 
-       if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
+       if (ar->num_rf_chains > ar->max_spatial_stream) {
                ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
-                           ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
-               ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
+                           ar->num_rf_chains, ar->max_spatial_stream);
+               ar->num_rf_chains = ar->max_spatial_stream;
        }
 
        ar->supp_tx_chainmask = (1 << ar->num_rf_chains) - 1;
@@ -3101,20 +3941,39 @@ void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
                return;
        }
 
+       if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
+               ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
+                                   TARGET_10_4_NUM_VDEVS;
+               ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
+                                      TARGET_10_4_NUM_VDEVS;
+               ar->num_tids = ar->num_active_peers * 2;
+               ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
+       }
+
+       /* TODO: Adjust max peer count for cases like WMI_SERVICE_RATECTRL_CACHE
+        * and WMI_SERVICE_IRAM_TIDS, etc.
+        */
+
        for (i = 0; i < num_mem_reqs; ++i) {
                req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
                num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
                unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
                num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
 
-               if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
+               if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
+                       if (ar->num_active_peers)
+                               num_units = ar->num_active_peers + 1;
+                       else
+                               num_units = ar->max_num_peers + 1;
+               } else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
                        /* number of units to allocate is number of
                         * peers, 1 extra for self peer on target */
                        /* this needs to be tied, host and target
                         * can get out of sync */
-                       num_units = TARGET_10X_NUM_PEERS + 1;
-               else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
-                       num_units = TARGET_10X_NUM_VDEVS + 1;
+                       num_units = ar->max_num_peers + 1;
+               } else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
+                       num_units = ar->max_num_vdevs + 1;
+               }
 
                ath10k_dbg(ar, ATH10K_DBG_WMI,
                           "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
@@ -3576,6 +4435,73 @@ out:
        dev_kfree_skb(skb);
 }
 
+static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+       struct wmi_cmd_hdr *cmd_hdr;
+       enum wmi_10_4_event_id id;
+
+       cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+       id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+       if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
+               goto out;
+
+       trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+       switch (id) {
+       case WMI_10_4_MGMT_RX_EVENTID:
+               ath10k_wmi_event_mgmt_rx(ar, skb);
+               /* mgmt_rx() owns the skb now! */
+               return;
+       case WMI_10_4_ECHO_EVENTID:
+               ath10k_wmi_event_echo(ar, skb);
+               break;
+       case WMI_10_4_DEBUG_MESG_EVENTID:
+               ath10k_wmi_event_debug_mesg(ar, skb);
+               break;
+       case WMI_10_4_SERVICE_READY_EVENTID:
+               ath10k_wmi_event_service_ready(ar, skb);
+               break;
+       case WMI_10_4_SCAN_EVENTID:
+               ath10k_wmi_event_scan(ar, skb);
+               break;
+       case WMI_10_4_CHAN_INFO_EVENTID:
+               ath10k_wmi_event_chan_info(ar, skb);
+               break;
+       case WMI_10_4_READY_EVENTID:
+               ath10k_wmi_event_ready(ar, skb);
+               break;
+       case WMI_10_4_PEER_STA_KICKOUT_EVENTID:
+               ath10k_wmi_event_peer_sta_kickout(ar, skb);
+               break;
+       case WMI_10_4_HOST_SWBA_EVENTID:
+               ath10k_wmi_event_host_swba(ar, skb);
+               break;
+       case WMI_10_4_TBTTOFFSET_UPDATE_EVENTID:
+               ath10k_wmi_event_tbttoffset_update(ar, skb);
+               break;
+       case WMI_10_4_DEBUG_PRINT_EVENTID:
+               ath10k_wmi_event_debug_print(ar, skb);
+               break;
+       case WMI_10_4_VDEV_START_RESP_EVENTID:
+               ath10k_wmi_event_vdev_start_resp(ar, skb);
+               break;
+       case WMI_10_4_VDEV_STOPPED_EVENTID:
+               ath10k_wmi_event_vdev_stopped(ar, skb);
+               break;
+       case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
+               ath10k_dbg(ar, ATH10K_DBG_WMI,
+                          "received event id %d not implemented\n", id);
+               break;
+       default:
+               ath10k_warn(ar, "Unknown eventid: %d\n", id);
+               break;
+       }
+
+out:
+       dev_kfree_skb(skb);
+}
+
 static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
 {
        int ret;
@@ -3950,6 +4876,88 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
        return buf;
 }
 
+static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
+{
+       struct wmi_init_cmd_10_4 *cmd;
+       struct sk_buff *buf;
+       struct wmi_resource_config_10_4 config = {};
+       u32 len;
+
+       config.num_vdevs = __cpu_to_le32(ar->max_num_vdevs);
+       config.num_peers = __cpu_to_le32(ar->max_num_peers);
+       config.num_active_peers = __cpu_to_le32(ar->num_active_peers);
+       config.num_tids = __cpu_to_le32(ar->num_tids);
+
+       config.num_offload_peers = __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_PEERS);
+       config.num_offload_reorder_buffs =
+                       __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS);
+       config.num_peer_keys  = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS);
+       config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT);
+       config.tx_chain_mask  = __cpu_to_le32(TARGET_10_4_TX_CHAIN_MASK);
+       config.rx_chain_mask  = __cpu_to_le32(TARGET_10_4_RX_CHAIN_MASK);
+
+       config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+       config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+       config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+       config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI);
+
+       config.rx_decap_mode        = __cpu_to_le32(TARGET_10_4_RX_DECAP_MODE);
+       config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS);
+       config.bmiss_offload_max_vdev =
+                       __cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV);
+       config.roam_offload_max_vdev  =
+                       __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV);
+       config.roam_offload_max_ap_profiles =
+                       __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES);
+       config.num_mcast_groups = __cpu_to_le32(TARGET_10_4_NUM_MCAST_GROUPS);
+       config.num_mcast_table_elems =
+                       __cpu_to_le32(TARGET_10_4_NUM_MCAST_TABLE_ELEMS);
+
+       config.mcast2ucast_mode = __cpu_to_le32(TARGET_10_4_MCAST2UCAST_MODE);
+       config.tx_dbg_log_size  = __cpu_to_le32(TARGET_10_4_TX_DBG_LOG_SIZE);
+       config.num_wds_entries  = __cpu_to_le32(TARGET_10_4_NUM_WDS_ENTRIES);
+       config.dma_burst_size   = __cpu_to_le32(TARGET_10_4_DMA_BURST_SIZE);
+       config.mac_aggr_delim   = __cpu_to_le32(TARGET_10_4_MAC_AGGR_DELIM);
+
+       config.rx_skip_defrag_timeout_dup_detection_check =
+         __cpu_to_le32(TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK);
+
+       config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG);
+       config.gtk_offload_max_vdev =
+                       __cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV);
+       config.num_msdu_desc = __cpu_to_le32(TARGET_10_4_NUM_MSDU_DESC);
+       config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS);
+       config.max_peer_ext_stats =
+                       __cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS);
+       config.smart_ant_cap = __cpu_to_le32(TARGET_10_4_SMART_ANT_CAP);
+
+       config.bk_minfree = __cpu_to_le32(TARGET_10_4_BK_MIN_FREE);
+       config.be_minfree = __cpu_to_le32(TARGET_10_4_BE_MIN_FREE);
+       config.vi_minfree = __cpu_to_le32(TARGET_10_4_VI_MIN_FREE);
+       config.vo_minfree = __cpu_to_le32(TARGET_10_4_VO_MIN_FREE);
+
+       config.rx_batchmode = __cpu_to_le32(TARGET_10_4_RX_BATCH_MODE);
+       config.tt_support =
+                       __cpu_to_le32(TARGET_10_4_THERMAL_THROTTLING_CONFIG);
+       config.atf_config = __cpu_to_le32(TARGET_10_4_ATF_CONFIG);
+       config.iphdr_pad_config = __cpu_to_le32(TARGET_10_4_IPHDR_PAD_CONFIG);
+       config.qwrap_config = __cpu_to_le32(TARGET_10_4_QWRAP_CONFIG);
+
+       len = sizeof(*cmd) +
+             (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+       buf = ath10k_wmi_alloc_skb(ar, len);
+       if (!buf)
+               return ERR_PTR(-ENOMEM);
+
+       cmd = (struct wmi_init_cmd_10_4 *)buf->data;
+       memcpy(&cmd->resource_config, &config, sizeof(config));
+       ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
+
+       ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.4\n");
+       return buf;
+}
+
 int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
 {
        if (arg->ie_len && !arg->ie)
@@ -4172,7 +5180,6 @@ void ath10k_wmi_start_scan_init(struct ath10k *ar,
                | WMI_SCAN_EVENT_BSS_CHANNEL
                | WMI_SCAN_EVENT_FOREIGN_CHANNEL
                | WMI_SCAN_EVENT_DEQUEUED;
-       arg->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
        arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
        arg->n_bssids = 1;
        arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
@@ -5412,9 +6419,64 @@ static const struct wmi_ops wmi_10_2_4_ops = {
        /* .gen_adaptive_qcs not implemented */
 };
 
+static const struct wmi_ops wmi_10_4_ops = {
+       .rx = ath10k_wmi_10_4_op_rx,
+       .map_svc = wmi_10_4_svc_map,
+
+       .pull_scan = ath10k_wmi_op_pull_scan_ev,
+       .pull_mgmt_rx = ath10k_wmi_10_4_op_pull_mgmt_rx_ev,
+       .pull_ch_info = ath10k_wmi_10_4_op_pull_ch_info_ev,
+       .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+       .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+       .pull_swba = ath10k_wmi_10_4_op_pull_swba_ev,
+       .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
+       .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+
+       .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+       .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+       .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+       .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+       .gen_init = ath10k_wmi_10_4_op_gen_init,
+       .gen_start_scan = ath10k_wmi_op_gen_start_scan,
+       .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+       .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+       .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+       .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+       .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+       .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+       .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+       .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+       .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+       .gen_peer_create = ath10k_wmi_op_gen_peer_create,
+       .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+       .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+       .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+       .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+       .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+       .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+       .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+       .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+       .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+       .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+       .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+       .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+       .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+       .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+       .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+
+       /* shared with 10.2 */
+       .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
+};
+
 int ath10k_wmi_attach(struct ath10k *ar)
 {
        switch (ar->wmi.op_version) {
+       case ATH10K_FW_WMI_OP_VERSION_10_4:
+               ar->wmi.ops = &wmi_10_4_ops;
+               ar->wmi.cmd = &wmi_10_4_cmd_map;
+               ar->wmi.vdev_param = &wmi_10_4_vdev_param_map;
+               ar->wmi.pdev_param = &wmi_10_4_pdev_param_map;
+               break;
        case ATH10K_FW_WMI_OP_VERSION_10_2_4:
                ar->wmi.cmd = &wmi_10_2_4_cmd_map;
                ar->wmi.ops = &wmi_10_2_4_ops;
index cf44a3d080a38c7a58b454d7e82419e98f1733a4..0d4efc9c5796432cec96bacf70c83a4f94d8e7dc 100644 (file)
@@ -150,6 +150,12 @@ enum wmi_service {
        WMI_SERVICE_SAP_AUTH_OFFLOAD,
        WMI_SERVICE_ATF,
        WMI_SERVICE_COEX_GPIO,
+       WMI_SERVICE_ENHANCED_PROXY_STA,
+       WMI_SERVICE_TT,
+       WMI_SERVICE_PEER_CACHING,
+       WMI_SERVICE_AUX_SPECTRAL_INTF,
+       WMI_SERVICE_AUX_CHAN_LOAD_INTF,
+       WMI_SERVICE_BSS_CHANNEL_INFO_64,
 
        /* keep last */
        WMI_SERVICE_MAX,
@@ -218,6 +224,51 @@ enum wmi_main_service {
        WMI_MAIN_SERVICE_TX_ENCAP,
 };
 
+enum wmi_10_4_service {
+       WMI_10_4_SERVICE_BEACON_OFFLOAD = 0,
+       WMI_10_4_SERVICE_SCAN_OFFLOAD,
+       WMI_10_4_SERVICE_ROAM_OFFLOAD,
+       WMI_10_4_SERVICE_BCN_MISS_OFFLOAD,
+       WMI_10_4_SERVICE_STA_PWRSAVE,
+       WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE,
+       WMI_10_4_SERVICE_AP_UAPSD,
+       WMI_10_4_SERVICE_AP_DFS,
+       WMI_10_4_SERVICE_11AC,
+       WMI_10_4_SERVICE_BLOCKACK,
+       WMI_10_4_SERVICE_PHYERR,
+       WMI_10_4_SERVICE_BCN_FILTER,
+       WMI_10_4_SERVICE_RTT,
+       WMI_10_4_SERVICE_RATECTRL,
+       WMI_10_4_SERVICE_WOW,
+       WMI_10_4_SERVICE_RATECTRL_CACHE,
+       WMI_10_4_SERVICE_IRAM_TIDS,
+       WMI_10_4_SERVICE_BURST,
+       WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+       WMI_10_4_SERVICE_GTK_OFFLOAD,
+       WMI_10_4_SERVICE_SCAN_SCH,
+       WMI_10_4_SERVICE_CSA_OFFLOAD,
+       WMI_10_4_SERVICE_CHATTER,
+       WMI_10_4_SERVICE_COEX_FREQAVOID,
+       WMI_10_4_SERVICE_PACKET_POWER_SAVE,
+       WMI_10_4_SERVICE_FORCE_FW_HANG,
+       WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+       WMI_10_4_SERVICE_GPIO,
+       WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+       WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+       WMI_10_4_SERVICE_STA_KEEP_ALIVE,
+       WMI_10_4_SERVICE_TX_ENCAP,
+       WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+       WMI_10_4_SERVICE_EARLY_RX,
+       WMI_10_4_SERVICE_ENHANCED_PROXY_STA,
+       WMI_10_4_SERVICE_TT,
+       WMI_10_4_SERVICE_ATF,
+       WMI_10_4_SERVICE_PEER_CACHING,
+       WMI_10_4_SERVICE_COEX_GPIO,
+       WMI_10_4_SERVICE_AUX_SPECTRAL_INTF,
+       WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF,
+       WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64,
+};
+
 static inline char *wmi_service_name(int service_id)
 {
 #define SVCSTR(x) case x: return #x
@@ -299,6 +350,12 @@ static inline char *wmi_service_name(int service_id)
        SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD);
        SVCSTR(WMI_SERVICE_ATF);
        SVCSTR(WMI_SERVICE_COEX_GPIO);
+       SVCSTR(WMI_SERVICE_ENHANCED_PROXY_STA);
+       SVCSTR(WMI_SERVICE_TT);
+       SVCSTR(WMI_SERVICE_PEER_CACHING);
+       SVCSTR(WMI_SERVICE_AUX_SPECTRAL_INTF);
+       SVCSTR(WMI_SERVICE_AUX_CHAN_LOAD_INTF);
+       SVCSTR(WMI_SERVICE_BSS_CHANNEL_INFO_64);
        default:
                return NULL;
        }
@@ -437,6 +494,95 @@ static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
               WMI_SERVICE_TX_ENCAP, len);
 }
 
+static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
+                                   size_t len)
+{
+       SVCMAP(WMI_10_4_SERVICE_BEACON_OFFLOAD,
+              WMI_SERVICE_BEACON_OFFLOAD, len);
+       SVCMAP(WMI_10_4_SERVICE_SCAN_OFFLOAD,
+              WMI_SERVICE_SCAN_OFFLOAD, len);
+       SVCMAP(WMI_10_4_SERVICE_ROAM_OFFLOAD,
+              WMI_SERVICE_ROAM_OFFLOAD, len);
+       SVCMAP(WMI_10_4_SERVICE_BCN_MISS_OFFLOAD,
+              WMI_SERVICE_BCN_MISS_OFFLOAD, len);
+       SVCMAP(WMI_10_4_SERVICE_STA_PWRSAVE,
+              WMI_SERVICE_STA_PWRSAVE, len);
+       SVCMAP(WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE,
+              WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
+       SVCMAP(WMI_10_4_SERVICE_AP_UAPSD,
+              WMI_SERVICE_AP_UAPSD, len);
+       SVCMAP(WMI_10_4_SERVICE_AP_DFS,
+              WMI_SERVICE_AP_DFS, len);
+       SVCMAP(WMI_10_4_SERVICE_11AC,
+              WMI_SERVICE_11AC, len);
+       SVCMAP(WMI_10_4_SERVICE_BLOCKACK,
+              WMI_SERVICE_BLOCKACK, len);
+       SVCMAP(WMI_10_4_SERVICE_PHYERR,
+              WMI_SERVICE_PHYERR, len);
+       SVCMAP(WMI_10_4_SERVICE_BCN_FILTER,
+              WMI_SERVICE_BCN_FILTER, len);
+       SVCMAP(WMI_10_4_SERVICE_RTT,
+              WMI_SERVICE_RTT, len);
+       SVCMAP(WMI_10_4_SERVICE_RATECTRL,
+              WMI_SERVICE_RATECTRL, len);
+       SVCMAP(WMI_10_4_SERVICE_WOW,
+              WMI_SERVICE_WOW, len);
+       SVCMAP(WMI_10_4_SERVICE_RATECTRL_CACHE,
+              WMI_SERVICE_RATECTRL_CACHE, len);
+       SVCMAP(WMI_10_4_SERVICE_IRAM_TIDS,
+              WMI_SERVICE_IRAM_TIDS, len);
+       SVCMAP(WMI_10_4_SERVICE_BURST,
+              WMI_SERVICE_BURST, len);
+       SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+              WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, len);
+       SVCMAP(WMI_10_4_SERVICE_GTK_OFFLOAD,
+              WMI_SERVICE_GTK_OFFLOAD, len);
+       SVCMAP(WMI_10_4_SERVICE_SCAN_SCH,
+              WMI_SERVICE_SCAN_SCH, len);
+       SVCMAP(WMI_10_4_SERVICE_CSA_OFFLOAD,
+              WMI_SERVICE_CSA_OFFLOAD, len);
+       SVCMAP(WMI_10_4_SERVICE_CHATTER,
+              WMI_SERVICE_CHATTER, len);
+       SVCMAP(WMI_10_4_SERVICE_COEX_FREQAVOID,
+              WMI_SERVICE_COEX_FREQAVOID, len);
+       SVCMAP(WMI_10_4_SERVICE_PACKET_POWER_SAVE,
+              WMI_SERVICE_PACKET_POWER_SAVE, len);
+       SVCMAP(WMI_10_4_SERVICE_FORCE_FW_HANG,
+              WMI_SERVICE_FORCE_FW_HANG, len);
+       SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+              WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
+       SVCMAP(WMI_10_4_SERVICE_GPIO,
+              WMI_SERVICE_GPIO, len);
+       SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+              WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
+       SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+              WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
+       SVCMAP(WMI_10_4_SERVICE_STA_KEEP_ALIVE,
+              WMI_SERVICE_STA_KEEP_ALIVE, len);
+       SVCMAP(WMI_10_4_SERVICE_TX_ENCAP,
+              WMI_SERVICE_TX_ENCAP, len);
+       SVCMAP(WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+              WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, len);
+       SVCMAP(WMI_10_4_SERVICE_EARLY_RX,
+              WMI_SERVICE_EARLY_RX, len);
+       SVCMAP(WMI_10_4_SERVICE_ENHANCED_PROXY_STA,
+              WMI_SERVICE_ENHANCED_PROXY_STA, len);
+       SVCMAP(WMI_10_4_SERVICE_TT,
+              WMI_SERVICE_TT, len);
+       SVCMAP(WMI_10_4_SERVICE_ATF,
+              WMI_SERVICE_ATF, len);
+       SVCMAP(WMI_10_4_SERVICE_PEER_CACHING,
+              WMI_SERVICE_PEER_CACHING, len);
+       SVCMAP(WMI_10_4_SERVICE_COEX_GPIO,
+              WMI_SERVICE_COEX_GPIO, len);
+       SVCMAP(WMI_10_4_SERVICE_AUX_SPECTRAL_INTF,
+              WMI_SERVICE_AUX_SPECTRAL_INTF, len);
+       SVCMAP(WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF,
+              WMI_SERVICE_AUX_CHAN_LOAD_INTF, len);
+       SVCMAP(WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64,
+              WMI_SERVICE_BSS_CHANNEL_INFO_64, len);
+}
+
 #undef SVCMAP
 
 /* 2 word representation of MAC addr */
@@ -565,6 +711,48 @@ struct wmi_cmd_map {
        u32 tdls_set_state_cmdid;
        u32 tdls_peer_update_cmdid;
        u32 adaptive_qcs_cmdid;
+       u32 scan_update_request_cmdid;
+       u32 vdev_standby_response_cmdid;
+       u32 vdev_resume_response_cmdid;
+       u32 wlan_peer_caching_add_peer_cmdid;
+       u32 wlan_peer_caching_evict_peer_cmdid;
+       u32 wlan_peer_caching_restore_peer_cmdid;
+       u32 wlan_peer_caching_print_all_peers_info_cmdid;
+       u32 peer_update_wds_entry_cmdid;
+       u32 peer_add_proxy_sta_entry_cmdid;
+       u32 rtt_keepalive_cmdid;
+       u32 oem_req_cmdid;
+       u32 nan_cmdid;
+       u32 vdev_ratemask_cmdid;
+       u32 qboost_cfg_cmdid;
+       u32 pdev_smart_ant_enable_cmdid;
+       u32 pdev_smart_ant_set_rx_antenna_cmdid;
+       u32 peer_smart_ant_set_tx_antenna_cmdid;
+       u32 peer_smart_ant_set_train_info_cmdid;
+       u32 peer_smart_ant_set_node_config_ops_cmdid;
+       u32 pdev_set_antenna_switch_table_cmdid;
+       u32 pdev_set_ctl_table_cmdid;
+       u32 pdev_set_mimogain_table_cmdid;
+       u32 pdev_ratepwr_table_cmdid;
+       u32 pdev_ratepwr_chainmsk_table_cmdid;
+       u32 pdev_fips_cmdid;
+       u32 tt_set_conf_cmdid;
+       u32 fwtest_cmdid;
+       u32 vdev_atf_request_cmdid;
+       u32 peer_atf_request_cmdid;
+       u32 pdev_get_ani_cck_config_cmdid;
+       u32 pdev_get_ani_ofdm_config_cmdid;
+       u32 pdev_reserve_ast_entry_cmdid;
+       u32 pdev_get_nfcal_power_cmdid;
+       u32 pdev_get_tpc_cmdid;
+       u32 pdev_get_ast_info_cmdid;
+       u32 vdev_set_dscp_tid_map_cmdid;
+       u32 pdev_get_info_cmdid;
+       u32 vdev_get_info_cmdid;
+       u32 vdev_filter_neighbor_rx_packets_cmdid;
+       u32 mu_cal_start_cmdid;
+       u32 set_cca_params_cmdid;
+       u32 pdev_bss_chan_info_request_cmdid;
 };
 
 /*
@@ -1220,6 +1408,216 @@ enum wmi_10_2_event_id {
        WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1,
 };
 
+enum wmi_10_4_cmd_id {
+       WMI_10_4_START_CMDID = 0x9000,
+       WMI_10_4_END_CMDID = 0x9FFF,
+       WMI_10_4_INIT_CMDID,
+       WMI_10_4_START_SCAN_CMDID = WMI_10_4_START_CMDID,
+       WMI_10_4_STOP_SCAN_CMDID,
+       WMI_10_4_SCAN_CHAN_LIST_CMDID,
+       WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
+       WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
+       WMI_10_4_ECHO_CMDID,
+       WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
+       WMI_10_4_PDEV_SET_CHANNEL_CMDID,
+       WMI_10_4_PDEV_SET_PARAM_CMDID,
+       WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
+       WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
+       WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
+       WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
+       WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
+       WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
+       WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
+       WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
+       WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+       WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
+       WMI_10_4_VDEV_CREATE_CMDID,
+       WMI_10_4_VDEV_DELETE_CMDID,
+       WMI_10_4_VDEV_START_REQUEST_CMDID,
+       WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
+       WMI_10_4_VDEV_UP_CMDID,
+       WMI_10_4_VDEV_STOP_CMDID,
+       WMI_10_4_VDEV_DOWN_CMDID,
+       WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
+       WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
+       WMI_10_4_VDEV_SET_PARAM_CMDID,
+       WMI_10_4_VDEV_INSTALL_KEY_CMDID,
+       WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
+       WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
+       WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
+       WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
+       WMI_10_4_PEER_CREATE_CMDID,
+       WMI_10_4_PEER_DELETE_CMDID,
+       WMI_10_4_PEER_FLUSH_TIDS_CMDID,
+       WMI_10_4_PEER_SET_PARAM_CMDID,
+       WMI_10_4_PEER_ASSOC_CMDID,
+       WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
+       WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
+       WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
+       WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+       WMI_10_4_PEER_MCAST_GROUP_CMDID,
+       WMI_10_4_BCN_TX_CMDID,
+       WMI_10_4_PDEV_SEND_BCN_CMDID,
+       WMI_10_4_BCN_PRB_TMPL_CMDID,
+       WMI_10_4_BCN_FILTER_RX_CMDID,
+       WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
+       WMI_10_4_MGMT_TX_CMDID,
+       WMI_10_4_PRB_TMPL_CMDID,
+       WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
+       WMI_10_4_ADDBA_SEND_CMDID,
+       WMI_10_4_ADDBA_STATUS_CMDID,
+       WMI_10_4_DELBA_SEND_CMDID,
+       WMI_10_4_ADDBA_SET_RESP_CMDID,
+       WMI_10_4_SEND_SINGLEAMSDU_CMDID,
+       WMI_10_4_STA_POWERSAVE_MODE_CMDID,
+       WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
+       WMI_10_4_STA_MIMO_PS_MODE_CMDID,
+       WMI_10_4_DBGLOG_CFG_CMDID,
+       WMI_10_4_PDEV_DFS_ENABLE_CMDID,
+       WMI_10_4_PDEV_DFS_DISABLE_CMDID,
+       WMI_10_4_PDEV_QVIT_CMDID,
+       WMI_10_4_ROAM_SCAN_MODE,
+       WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
+       WMI_10_4_ROAM_SCAN_PERIOD,
+       WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+       WMI_10_4_ROAM_AP_PROFILE,
+       WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
+       WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
+       WMI_10_4_OFL_SCAN_PERIOD,
+       WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
+       WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
+       WMI_10_4_P2P_GO_SET_BEACON_IE,
+       WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
+       WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
+       WMI_10_4_AP_PS_PEER_PARAM_CMDID,
+       WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
+       WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
+       WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
+       WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+       WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+       WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+       WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+       WMI_10_4_PDEV_SUSPEND_CMDID,
+       WMI_10_4_PDEV_RESUME_CMDID,
+       WMI_10_4_ADD_BCN_FILTER_CMDID,
+       WMI_10_4_RMV_BCN_FILTER_CMDID,
+       WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
+       WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
+       WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+       WMI_10_4_WOW_ENABLE_CMDID,
+       WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+       WMI_10_4_RTT_MEASREQ_CMDID,
+       WMI_10_4_RTT_TSF_CMDID,
+       WMI_10_4_RTT_KEEPALIVE_CMDID,
+       WMI_10_4_OEM_REQ_CMDID,
+       WMI_10_4_NAN_CMDID,
+       WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+       WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+       WMI_10_4_REQUEST_STATS_CMDID,
+       WMI_10_4_GPIO_CONFIG_CMDID,
+       WMI_10_4_GPIO_OUTPUT_CMDID,
+       WMI_10_4_VDEV_RATEMASK_CMDID,
+       WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
+       WMI_10_4_GTK_OFFLOAD_CMDID,
+       WMI_10_4_QBOOST_CFG_CMDID,
+       WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
+       WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
+       WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+       WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+       WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+       WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+       WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
+       WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
+       WMI_10_4_FORCE_FW_HANG_CMDID,
+       WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+       WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
+       WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+       WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
+       WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
+       WMI_10_4_PDEV_FIPS_CMDID,
+       WMI_10_4_TT_SET_CONF_CMDID,
+       WMI_10_4_FWTEST_CMDID,
+       WMI_10_4_VDEV_ATF_REQUEST_CMDID,
+       WMI_10_4_PEER_ATF_REQUEST_CMDID,
+       WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+       WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+       WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
+       WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
+       WMI_10_4_PDEV_GET_TPC_CMDID,
+       WMI_10_4_PDEV_GET_AST_INFO_CMDID,
+       WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
+       WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
+       WMI_10_4_PDEV_GET_INFO_CMDID,
+       WMI_10_4_VDEV_GET_INFO_CMDID,
+       WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+       WMI_10_4_MU_CAL_START_CMDID,
+       WMI_10_4_SET_CCA_PARAMS_CMDID,
+       WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+       WMI_10_4_PDEV_UTF_CMDID = WMI_10_4_END_CMDID - 1,
+};
+
+enum wmi_10_4_event_id {
+       WMI_10_4_SERVICE_READY_EVENTID = 0x8000,
+       WMI_10_4_READY_EVENTID,
+       WMI_10_4_DEBUG_MESG_EVENTID,
+       WMI_10_4_START_EVENTID = 0x9000,
+       WMI_10_4_END_EVENTID = 0x9FFF,
+       WMI_10_4_SCAN_EVENTID = WMI_10_4_START_EVENTID,
+       WMI_10_4_ECHO_EVENTID,
+       WMI_10_4_UPDATE_STATS_EVENTID,
+       WMI_10_4_INST_RSSI_STATS_EVENTID,
+       WMI_10_4_VDEV_START_RESP_EVENTID,
+       WMI_10_4_VDEV_STANDBY_REQ_EVENTID,
+       WMI_10_4_VDEV_RESUME_REQ_EVENTID,
+       WMI_10_4_VDEV_STOPPED_EVENTID,
+       WMI_10_4_PEER_STA_KICKOUT_EVENTID,
+       WMI_10_4_HOST_SWBA_EVENTID,
+       WMI_10_4_TBTTOFFSET_UPDATE_EVENTID,
+       WMI_10_4_MGMT_RX_EVENTID,
+       WMI_10_4_CHAN_INFO_EVENTID,
+       WMI_10_4_PHYERR_EVENTID,
+       WMI_10_4_ROAM_EVENTID,
+       WMI_10_4_PROFILE_MATCH,
+       WMI_10_4_DEBUG_PRINT_EVENTID,
+       WMI_10_4_PDEV_QVIT_EVENTID,
+       WMI_10_4_WLAN_PROFILE_DATA_EVENTID,
+       WMI_10_4_RTT_MEASUREMENT_REPORT_EVENTID,
+       WMI_10_4_TSF_MEASUREMENT_REPORT_EVENTID,
+       WMI_10_4_RTT_ERROR_REPORT_EVENTID,
+       WMI_10_4_RTT_KEEPALIVE_EVENTID,
+       WMI_10_4_OEM_CAPABILITY_EVENTID,
+       WMI_10_4_OEM_MEASUREMENT_REPORT_EVENTID,
+       WMI_10_4_OEM_ERROR_REPORT_EVENTID,
+       WMI_10_4_NAN_EVENTID,
+       WMI_10_4_WOW_WAKEUP_HOST_EVENTID,
+       WMI_10_4_GTK_OFFLOAD_STATUS_EVENTID,
+       WMI_10_4_GTK_REKEY_FAIL_EVENTID,
+       WMI_10_4_DCS_INTERFERENCE_EVENTID,
+       WMI_10_4_PDEV_TPC_CONFIG_EVENTID,
+       WMI_10_4_CSA_HANDLING_EVENTID,
+       WMI_10_4_GPIO_INPUT_EVENTID,
+       WMI_10_4_PEER_RATECODE_LIST_EVENTID,
+       WMI_10_4_GENERIC_BUFFER_EVENTID,
+       WMI_10_4_MCAST_BUF_RELEASE_EVENTID,
+       WMI_10_4_MCAST_LIST_AGEOUT_EVENTID,
+       WMI_10_4_VDEV_GET_KEEPALIVE_EVENTID,
+       WMI_10_4_WDS_PEER_EVENTID,
+       WMI_10_4_PEER_STA_PS_STATECHG_EVENTID,
+       WMI_10_4_PDEV_FIPS_EVENTID,
+       WMI_10_4_TT_STATS_EVENTID,
+       WMI_10_4_PDEV_CHANNEL_HOPPING_EVENTID,
+       WMI_10_4_PDEV_ANI_CCK_LEVEL_EVENTID,
+       WMI_10_4_PDEV_ANI_OFDM_LEVEL_EVENTID,
+       WMI_10_4_PDEV_RESERVE_AST_ENTRY_EVENTID,
+       WMI_10_4_PDEV_NFCAL_POWER_EVENTID,
+       WMI_10_4_PDEV_TPC_EVENTID,
+       WMI_10_4_PDEV_GET_AST_INFO_EVENTID,
+       WMI_10_4_PDEV_TEMPERATURE_EVENTID,
+       WMI_10_4_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID,
+       WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID,
+       WMI_10_4_PDEV_UTF_EVENTID = WMI_10_4_END_EVENTID - 1,
+};
+
 enum wmi_phy_mode {
        MODE_11A        = 0,   /* 11a Mode */
        MODE_11G        = 1,   /* 11b/g Mode */
@@ -1349,7 +1747,8 @@ enum wmi_channel_change_cause {
 /* Indicate reason for channel switch */
 #define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13)
 
-#define WMI_MAX_SPATIAL_STREAM   3
+#define WMI_MAX_SPATIAL_STREAM        3 /* default max ss */
+#define WMI_10_4_MAX_SPATIAL_STREAM   4
 
 /* HT Capabilities*/
 #define WMI_HT_CAP_ENABLED                0x0001   /* HT Enabled/ disabled */
@@ -1979,8 +2378,224 @@ struct wmi_resource_config_10_2 {
        __le32 feature_mask;
 } __packed;
 
-#define NUM_UNITS_IS_NUM_VDEVS   0x1
-#define NUM_UNITS_IS_NUM_PEERS   0x2
+#define NUM_UNITS_IS_NUM_VDEVS         BIT(0)
+#define NUM_UNITS_IS_NUM_PEERS         BIT(1)
+#define NUM_UNITS_IS_NUM_ACTIVE_PEERS  BIT(2)
+
+struct wmi_resource_config_10_4 {
+       /* Number of virtual devices (VAPs) to support */
+       __le32 num_vdevs;
+
+       /* Number of peer nodes to support */
+       __le32 num_peers;
+
+       /* Number of active peer nodes to support */
+       __le32 num_active_peers;
+
+       /* In offload mode, target supports features like WOW, chatter and other
+        * protocol offloads. In order to support them some functionalities like
+        * reorder buffering, PN checking need to be done in target.
+        * This determines maximum number of peers supported by target in
+        * offload mode.
+        */
+       __le32 num_offload_peers;
+
+       /* Number of reorder buffers available for doing target based reorder
+        * Rx reorder buffering
+        */
+       __le32 num_offload_reorder_buffs;
+
+       /* Number of keys per peer */
+       __le32 num_peer_keys;
+
+       /* Total number of TX/RX data TIDs */
+       __le32 num_tids;
+
+       /* Max skid for resolving hash collisions.
+        * The address search table is sparse, so that if two MAC addresses
+        * result in the same hash value, the second of these conflicting
+        * entries can slide to the next index in the address search table,
+        * and use it, if it is unoccupied.  This ast_skid_limit parameter
+        * specifies the upper bound on how many subsequent indices to search
+        * over to find an unoccupied space.
+        */
+       __le32 ast_skid_limit;
+
+       /* The nominal chain mask for transmit.
+        * The chain mask may be modified dynamically, e.g. to operate AP tx
+        * with a reduced number of chains if no clients are associated.
+        * This configuration parameter specifies the nominal chain-mask that
+        * should be used when not operating with a reduced set of tx chains.
+        */
+       __le32 tx_chain_mask;
+
+       /* The nominal chain mask for receive.
+        * The chain mask may be modified dynamically, e.g. for a client to use
+        * a reduced number of chains for receive if the traffic to the client
+        * is low enough that it doesn't require downlink MIMO or antenna
+        * diversity. This configuration parameter specifies the nominal
+        * chain-mask that should be used when not operating with a reduced
+        * set of rx chains.
+        */
+       __le32 rx_chain_mask;
+
+       /* What rx reorder timeout (ms) to use for the AC.
+        * Each WMM access class (voice, video, best-effort, background) will
+        * have its own timeout value to dictate how long to wait for missing
+        * rx MPDUs to arrive before flushing subsequent MPDUs that have already
+        * been received. This parameter specifies the timeout in milliseconds
+        * for each class.
+        */
+       __le32 rx_timeout_pri[4];
+
+       /* What mode the rx should decap packets to.
+        * MAC can decap to RAW (no decap), native wifi or Ethernet types.
+        * This setting also determines the default TX behavior, however TX
+        * behavior can be modified on a per VAP basis during VAP init
+        */
+       __le32 rx_decap_mode;
+
+       __le32 scan_max_pending_req;
+
+       __le32 bmiss_offload_max_vdev;
+
+       __le32 roam_offload_max_vdev;
+
+       __le32 roam_offload_max_ap_profiles;
+
+       /* How many groups to use for mcast->ucast conversion.
+        * The target's WAL maintains a table to hold information regarding
+        * which peers belong to a given multicast group, so that if
+        * multicast->unicast conversion is enabled, the target can convert
+        * multicast tx frames to a series of unicast tx frames, to each peer
+        * within the multicast group. This num_mcast_groups configuration
+        * parameter tells the target how many multicast groups to provide
+        * storage for within its multicast group membership table.
+        */
+       __le32 num_mcast_groups;
+
+       /* Size to alloc for the mcast membership table.
+        * This num_mcast_table_elems configuration parameter tells the target
+        * how many peer elements it needs to provide storage for in its
+        * multicast group membership table. These multicast group membership
+        * table elements are shared by the multicast groups stored within
+        * the table.
+        */
+       __le32 num_mcast_table_elems;
+
+       /* Whether/how to do multicast->unicast conversion.
+        * This configuration parameter specifies whether the target should
+        * perform multicast --> unicast conversion on transmit, and if so,
+        * what to do if it finds no entries in its multicast group membership
+        * table for the multicast IP address in the tx frame.
+        * Configuration value:
+        * 0 -> Do not perform multicast to unicast conversion.
+        * 1 -> Convert multicast frames to unicast, if the IP multicast address
+        *      from the tx frame is found in the multicast group membership
+        *      table.  If the IP multicast address is not found, drop the frame
+        * 2 -> Convert multicast frames to unicast, if the IP multicast address
+        *      from the tx frame is found in the multicast group membership
+        *      table.  If the IP multicast address is not found, transmit the
+        *      frame as multicast.
+        */
+       __le32 mcast2ucast_mode;
+
+       /* How much memory to allocate for a tx PPDU dbg log.
+        * This parameter controls how much memory the target will allocate to
+        * store a log of tx PPDU meta-information (how large the PPDU was,
+        * when it was sent, whether it was successful, etc.)
+        */
+       __le32 tx_dbg_log_size;
+
+       /* How many AST entries to be allocated for WDS */
+       __le32 num_wds_entries;
+
+       /* MAC DMA burst size. 0 -default, 1 -256B */
+       __le32 dma_burst_size;
+
+       /* Fixed delimiters to be inserted after every MPDU to account for
+        * interface latency to avoid underrun.
+        */
+       __le32 mac_aggr_delim;
+
+       /* Determine whether target is responsible for detecting duplicate
+        * non-aggregate MPDU and timing out stale fragments. A-MPDU reordering
+        * is always performed on the target.
+        *
+        * 0: target responsible for frag timeout and dup checking
+        * 1: host responsible for frag timeout and dup checking
+        */
+       __le32 rx_skip_defrag_timeout_dup_detection_check;
+
+       /* Configuration for VoW : No of Video nodes to be supported and max
+        * no of descriptors for each video link (node).
+        */
+       __le32 vow_config;
+
+       /* Maximum vdev that could use gtk offload */
+       __le32 gtk_offload_max_vdev;
+
+       /* Number of msdu descriptors target should use */
+       __le32 num_msdu_desc;
+
+       /* Max number of tx fragments per MSDU.
+        * This parameter controls the max number of tx fragments per MSDU.
+        * This will passed by target as part of the WMI_SERVICE_READY event
+        * and is overridden by the OS shim as required.
+        */
+       __le32 max_frag_entries;
+
+       /* Max number of extended peer stats.
+        * This parameter controls the max number of peers for which extended
+        * statistics are supported by target
+        */
+       __le32 max_peer_ext_stats;
+
+       /* Smart antenna capabilities information.
+        * 1 - Smart antenna is enabled
+        * 0 - Smart antenna is disabled
+        * In future this can contain smart antenna specific capabilities.
+        */
+       __le32 smart_ant_cap;
+
+       /* User can configure the buffers allocated for each AC (BE, BK, VI, VO)
+        * during init.
+        */
+       __le32 bk_minfree;
+       __le32 be_minfree;
+       __le32 vi_minfree;
+       __le32 vo_minfree;
+
+       /* Rx batch mode capability.
+        * 1 - Rx batch mode enabled
+        * 0 - Rx batch mode disabled
+        */
+       __le32 rx_batchmode;
+
+       /* Thermal throttling capability.
+        * 1 - Capable of thermal throttling
+        * 0 - Not capable of thermal throttling
+        */
+       __le32 tt_support;
+
+       /* ATF configuration.
+        * 1  - Enable ATF
+        * 0  - Disable ATF
+        */
+       __le32 atf_config;
+
+       /* Configure padding to manage IP header un-alignment
+        * 1  - Enable padding
+        * 0  - Disable padding
+        */
+       __le32 iphdr_pad_config;
+
+       /* qwrap configuration
+        * 1  - This is qwrap configuration
+        * 0  - This is not qwrap
+        */
+       __le32 qwrap_config;
+} __packed;
 
 /* strucutre describing host memory chunk. */
 struct host_memory_chunk {
@@ -2014,6 +2629,11 @@ struct wmi_init_cmd_10_2 {
        struct wmi_host_mem_chunks mem_chunks;
 } __packed;
 
+struct wmi_init_cmd_10_4 {
+       struct wmi_resource_config_10_4 resource_config;
+       struct wmi_host_mem_chunks mem_chunks;
+} __packed;
+
 struct wmi_chan_list_entry {
        __le16 freq;
        u8 phy_mode; /* valid for 10.2 only */
@@ -2260,15 +2880,17 @@ enum wmi_bss_filter {
 };
 
 enum wmi_scan_event_type {
-       WMI_SCAN_EVENT_STARTED         = 0x1,
-       WMI_SCAN_EVENT_COMPLETED       = 0x2,
-       WMI_SCAN_EVENT_BSS_CHANNEL     = 0x4,
-       WMI_SCAN_EVENT_FOREIGN_CHANNEL = 0x8,
-       WMI_SCAN_EVENT_DEQUEUED        = 0x10,
-       WMI_SCAN_EVENT_PREEMPTED       = 0x20, /* possibly by high-prio scan */
-       WMI_SCAN_EVENT_START_FAILED    = 0x40,
-       WMI_SCAN_EVENT_RESTARTED       = 0x80,
-       WMI_SCAN_EVENT_MAX             = 0x8000
+       WMI_SCAN_EVENT_STARTED              = BIT(0),
+       WMI_SCAN_EVENT_COMPLETED            = BIT(1),
+       WMI_SCAN_EVENT_BSS_CHANNEL          = BIT(2),
+       WMI_SCAN_EVENT_FOREIGN_CHANNEL      = BIT(3),
+       WMI_SCAN_EVENT_DEQUEUED             = BIT(4),
+       /* possibly by high-prio scan */
+       WMI_SCAN_EVENT_PREEMPTED            = BIT(5),
+       WMI_SCAN_EVENT_START_FAILED         = BIT(6),
+       WMI_SCAN_EVENT_RESTARTED            = BIT(7),
+       WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT = BIT(8),
+       WMI_SCAN_EVENT_MAX                  = BIT(15),
 };
 
 enum wmi_scan_completion_reason {
@@ -2276,6 +2898,7 @@ enum wmi_scan_completion_reason {
        WMI_SCAN_REASON_CANCELLED,
        WMI_SCAN_REASON_PREEMPTED,
        WMI_SCAN_REASON_TIMEDOUT,
+       WMI_SCAN_REASON_INTERNAL_FAILURE,
        WMI_SCAN_REASON_MAX,
 };
 
@@ -2329,6 +2952,21 @@ struct wmi_mgmt_rx_event_v2 {
        u8 buf[0];
 } __packed;
 
+struct wmi_10_4_mgmt_rx_hdr {
+       __le32 channel;
+       __le32 snr;
+           u8 rssi_ctl[4];
+       __le32 rate;
+       __le32 phy_mode;
+       __le32 buf_len;
+       __le32 status;
+} __packed;
+
+struct wmi_10_4_mgmt_rx_event {
+       struct wmi_10_4_mgmt_rx_hdr hdr;
+       u8 buf[0];
+} __packed;
+
 #define WMI_RX_STATUS_OK                       0x00
 #define WMI_RX_STATUS_ERR_CRC                  0x01
 #define WMI_RX_STATUS_ERR_DECRYPT              0x08
@@ -2613,6 +3251,48 @@ struct wmi_pdev_param_map {
        u32 burst_dur;
        u32 burst_enable;
        u32 cal_period;
+       u32 aggr_burst;
+       u32 rx_decap_mode;
+       u32 smart_antenna_default_antenna;
+       u32 igmpmld_override;
+       u32 igmpmld_tid;
+       u32 antenna_gain;
+       u32 rx_filter;
+       u32 set_mcast_to_ucast_tid;
+       u32 proxy_sta_mode;
+       u32 set_mcast2ucast_mode;
+       u32 set_mcast2ucast_buffer;
+       u32 remove_mcast2ucast_buffer;
+       u32 peer_sta_ps_statechg_enable;
+       u32 igmpmld_ac_override;
+       u32 block_interbss;
+       u32 set_disable_reset_cmdid;
+       u32 set_msdu_ttl_cmdid;
+       u32 set_ppdu_duration_cmdid;
+       u32 txbf_sound_period_cmdid;
+       u32 set_promisc_mode_cmdid;
+       u32 set_burst_mode_cmdid;
+       u32 en_stats;
+       u32 mu_group_policy;
+       u32 noise_detection;
+       u32 noise_threshold;
+       u32 dpd_enable;
+       u32 set_mcast_bcast_echo;
+       u32 atf_strict_sch;
+       u32 atf_sched_duration;
+       u32 ant_plzn;
+       u32 mgmt_retry_limit;
+       u32 sensitivity_level;
+       u32 signed_txpower_2g;
+       u32 signed_txpower_5g;
+       u32 enable_per_tid_amsdu;
+       u32 enable_per_tid_ampdu;
+       u32 cca_threshold;
+       u32 rts_fixed_rate;
+       u32 pdev_reset;
+       u32 wapi_mbssid_offset;
+       u32 arp_srcaddr;
+       u32 arp_dstaddr;
 };
 
 #define WMI_PDEV_PARAM_UNSUPPORTED 0
@@ -2828,6 +3508,100 @@ enum wmi_10x_pdev_param {
        WMI_10X_PDEV_PARAM_CAL_PERIOD
 };
 
+enum wmi_10_4_pdev_param {
+       WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+       WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
+       WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
+       WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
+       WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
+       WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
+       WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
+       WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+       WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
+       WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
+       WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+       WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
+       WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
+       WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+       WMI_10_4_PDEV_PARAM_LTR_ENABLE,
+       WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
+       WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
+       WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
+       WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
+       WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+       WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+       WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
+       WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+       WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
+       WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
+       WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+       WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+       WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+       WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+       WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+       WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+       WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+       WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+       WMI_10_4_PDEV_PARAM_PMF_QOS,
+       WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
+       WMI_10_4_PDEV_PARAM_DCS,
+       WMI_10_4_PDEV_PARAM_ANI_ENABLE,
+       WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
+       WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
+       WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
+       WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
+       WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
+       WMI_10_4_PDEV_PARAM_PROXY_STA,
+       WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
+       WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
+       WMI_10_4_PDEV_PARAM_AGGR_BURST,
+       WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
+       WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
+       WMI_10_4_PDEV_PARAM_BURST_DUR,
+       WMI_10_4_PDEV_PARAM_BURST_ENABLE,
+       WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+       WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
+       WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
+       WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
+       WMI_10_4_PDEV_PARAM_RX_FILTER,
+       WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
+       WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
+       WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+       WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+       WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+       WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+       WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+       WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
+       WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+       WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+       WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+       WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+       WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+       WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
+       WMI_10_4_PDEV_PARAM_EN_STATS,
+       WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
+       WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
+       WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
+       WMI_10_4_PDEV_PARAM_DPD_ENABLE,
+       WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+       WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
+       WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
+       WMI_10_4_PDEV_PARAM_ANT_PLZN,
+       WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
+       WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
+       WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
+       WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
+       WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+       WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+       WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
+       WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
+       WMI_10_4_PDEV_PARAM_CAL_PERIOD,
+       WMI_10_4_PDEV_PARAM_PDEV_RESET,
+       WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+       WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
+       WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
+};
+
 struct wmi_pdev_set_param_cmd {
        __le32 param_id;
        __le32 param_value;
@@ -3506,6 +4280,22 @@ struct wmi_vdev_param_map {
        u32 drop_unencry;
        u32 tx_encap_type;
        u32 ap_detect_out_of_sync_sleeping_sta_time_secs;
+       u32 rc_num_retries;
+       u32 cabq_maxdur;
+       u32 mfptest_set;
+       u32 rts_fixed_rate;
+       u32 vht_sgimask;
+       u32 vht80_ratemask;
+       u32 early_rx_adjust_enable;
+       u32 early_rx_tgt_bmiss_num;
+       u32 early_rx_bmiss_sample_cycle;
+       u32 early_rx_slop_step;
+       u32 early_rx_init_slop;
+       u32 early_rx_adjust_pause;
+       u32 proxy_sta;
+       u32 meru_vc;
+       u32 rx_decap_type;
+       u32 bw_nss_ratemask;
 };
 
 #define WMI_VDEV_PARAM_UNSUPPORTED 0
@@ -3764,6 +4554,75 @@ enum wmi_10x_vdev_param {
        WMI_10X_VDEV_PARAM_VHT80_RATEMASK,
 };
 
+enum wmi_10_4_vdev_param {
+       WMI_10_4_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+       WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+       WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
+       WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
+       WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
+       WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
+       WMI_10_4_VDEV_PARAM_SLOT_TIME,
+       WMI_10_4_VDEV_PARAM_PREAMBLE,
+       WMI_10_4_VDEV_PARAM_SWBA_TIME,
+       WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
+       WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
+       WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
+       WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
+       WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+       WMI_10_4_VDEV_PARAM_WDS,
+       WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
+       WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
+       WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
+       WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
+       WMI_10_4_VDEV_PARAM_FEATURE_WMM,
+       WMI_10_4_VDEV_PARAM_CHWIDTH,
+       WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
+       WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
+       WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
+       WMI_10_4_VDEV_PARAM_MGMT_RATE,
+       WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
+       WMI_10_4_VDEV_PARAM_FIXED_RATE,
+       WMI_10_4_VDEV_PARAM_SGI,
+       WMI_10_4_VDEV_PARAM_LDPC,
+       WMI_10_4_VDEV_PARAM_TX_STBC,
+       WMI_10_4_VDEV_PARAM_RX_STBC,
+       WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
+       WMI_10_4_VDEV_PARAM_DEF_KEYID,
+       WMI_10_4_VDEV_PARAM_NSS,
+       WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
+       WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
+       WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
+       WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
+       WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+       WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+       WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
+       WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
+       WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
+       WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
+       WMI_10_4_VDEV_PARAM_TXBF,
+       WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
+       WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
+       WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
+       WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+       WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
+       WMI_10_4_VDEV_PARAM_MFPTEST_SET,
+       WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
+       WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
+       WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
+       WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+       WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+       WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+       WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+       WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+       WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+       WMI_10_4_VDEV_PARAM_PROXY_STA,
+       WMI_10_4_VDEV_PARAM_MERU_VC,
+       WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
+       WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+};
+
 #define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
 #define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
 #define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
@@ -4305,6 +5164,14 @@ struct wmi_tim_info {
        __le32 tim_num_ps_pending;
 } __packed;
 
+struct wmi_tim_info_arg {
+       __le32 tim_len;
+       __le32 tim_mcast;
+       const __le32 *tim_bitmap;
+       __le32 tim_changed;
+       __le32 tim_num_ps_pending;
+} __packed;
+
 /* Maximum number of NOA Descriptors supported */
 #define WMI_P2P_MAX_NOA_DESCRIPTORS 4
 #define WMI_P2P_OPPPS_ENABLE_BIT       BIT(0)
@@ -4336,6 +5203,47 @@ struct wmi_host_swba_event {
        struct wmi_bcn_info bcn_info[0];
 } __packed;
 
+/* 16 words = 512 client + 1 word = for guard */
+#define WMI_10_4_TIM_BITMAP_ARRAY_SIZE 17
+
+struct wmi_10_4_tim_info {
+       __le32 tim_len;
+       __le32 tim_mcast;
+       __le32 tim_bitmap[WMI_10_4_TIM_BITMAP_ARRAY_SIZE];
+       __le32 tim_changed;
+       __le32 tim_num_ps_pending;
+} __packed;
+
+#define WMI_10_4_P2P_MAX_NOA_DESCRIPTORS 1
+
+struct wmi_10_4_p2p_noa_info {
+       /* Bit 0 - Flag to indicate an update in NOA schedule
+        * Bits 7-1 - Reserved
+        */
+       u8 changed;
+       /* NOA index */
+       u8 index;
+       /* Bit 0 - Opp PS state of the AP
+        * Bits 1-7 - Ctwindow in TUs
+        */
+       u8 ctwindow_oppps;
+       /* Number of NOA descriptors */
+       u8 num_descriptors;
+
+       struct wmi_p2p_noa_descriptor
+               noa_descriptors[WMI_10_4_P2P_MAX_NOA_DESCRIPTORS];
+} __packed;
+
+struct wmi_10_4_bcn_info {
+       struct wmi_10_4_tim_info tim_info;
+       struct wmi_10_4_p2p_noa_info p2p_noa_info;
+} __packed;
+
+struct wmi_10_4_host_swba_event {
+       __le32 vdev_map;
+       struct wmi_10_4_bcn_info bcn_info[0];
+} __packed;
+
 #define WMI_MAX_AP_VDEV 16
 
 struct wmi_tbtt_offset_event {
@@ -4660,6 +5568,18 @@ struct wmi_chan_info_event {
        __le32 cycle_count;
 } __packed;
 
+struct wmi_10_4_chan_info_event {
+       __le32 err_code;
+       __le32 freq;
+       __le32 cmd_flags;
+       __le32 noise_floor;
+       __le32 rx_clear_count;
+       __le32 cycle_count;
+       __le32 chan_tx_pwr_range;
+       __le32 chan_tx_pwr_tp;
+       __le32 rx_frame_count;
+} __packed;
+
 struct wmi_peer_sta_kickout_event {
        struct wmi_mac_addr peer_macaddr;
 } __packed;
@@ -4840,6 +5760,9 @@ struct wmi_ch_info_ev_arg {
        __le32 noise_floor;
        __le32 rx_clear_count;
        __le32 cycle_count;
+       __le32 chan_tx_pwr_range;
+       __le32 chan_tx_pwr_tp;
+       __le32 rx_frame_count;
 };
 
 struct wmi_vdev_start_ev_arg {
@@ -4855,7 +5778,7 @@ struct wmi_peer_kick_ev_arg {
 
 struct wmi_swba_ev_arg {
        __le32 vdev_map;
-       const struct wmi_tim_info *tim_info[WMI_MAX_AP_VDEV];
+       struct wmi_tim_info_arg tim_info[WMI_MAX_AP_VDEV];
        const struct wmi_p2p_noa_info *noa_info[WMI_MAX_AP_VDEV];
 };
 
index fc595b92ac56007a024bc9cb5d871e19cef4ba9c..c5f8bc4b5595ecda04fcb7b130f68180c6d74791 100644 (file)
 #define AR_PHY_MODE              (AR_SM_BASE + 0x8)
 #define AR_PHY_ACTIVE            (AR_SM_BASE + 0xc)
 #define AR_PHY_SPUR_MASK_A       (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x18 : 0x20))
-#define AR_PHY_SPUR_MASK_B       (AR_SM_BASE + 0x24)
+#define AR_PHY_SPUR_MASK_B       (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x1c : 0x24))
 #define AR_PHY_SPECTRAL_SCAN     (AR_SM_BASE + 0x28)
 #define AR_PHY_RADAR_BW_FILTER   (AR_SM_BASE + 0x2c)
 #define AR_PHY_SEARCH_START_DELAY (AR_SM_BASE + 0x30)
 #define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A                       0x3FF
 #define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A_S                     0
 
-#define AR_PHY_TEST              (AR_SM_BASE + 0x160)
+#define AR_PHY_TEST              (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x15c : 0x160))
 
 #define AR_PHY_TEST_BBB_OBS_SEL       0x780000
 #define AR_PHY_TEST_BBB_OBS_SEL_S     19
 #define AR_PHY_TEST_CTL_DEBUGPORT_SEL_S          29
 
 
-#define AR_PHY_TSTDAC            (AR_SM_BASE + 0x168)
+#define AR_PHY_TSTDAC            (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x164 : 0x168))
 
-#define AR_PHY_CHAN_STATUS       (AR_SM_BASE + 0x16c)
+#define AR_PHY_CHAN_STATUS       (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x168 : 0x16c))
 
 #define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x16c : 0x170))
 #define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ    0x00000008
 #define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ_S  3
 
-#define AR_PHY_CHNINFO_NOISEPWR  (AR_SM_BASE + 0x174)
-#define AR_PHY_CHNINFO_GAINDIFF  (AR_SM_BASE + 0x178)
-#define AR_PHY_CHNINFO_FINETIM   (AR_SM_BASE + 0x17c)
-#define AR_PHY_CHAN_INFO_GAIN_0  (AR_SM_BASE + 0x180)
-#define AR_PHY_SCRAMBLER_SEED    (AR_SM_BASE + 0x190)
-#define AR_PHY_CCK_TX_CTRL       (AR_SM_BASE + 0x194)
+#define AR_PHY_CHNINFO_NOISEPWR  (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x170 : 0x174))
+#define AR_PHY_CHNINFO_GAINDIFF  (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x174 : 0x178))
+#define AR_PHY_CHNINFO_FINETIM   (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x178 : 0x17c))
+#define AR_PHY_CHAN_INFO_GAIN_0  (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x17c : 0x180))
+#define AR_PHY_SCRAMBLER_SEED    (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x184 : 0x190))
+#define AR_PHY_CCK_TX_CTRL       (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x188 : 0x194))
 
 #define AR_PHY_HEAVYCLIP_CTL     (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x198 : 0x1a4))
 #define AR_PHY_HEAVYCLIP_20      (AR_SM_BASE + 0x1a8)
 #define AR_PHY_HEAVYCLIP_40      (AR_SM_BASE + 0x1ac)
+#define AR_PHY_HEAVYCLIP_1      (AR_SM_BASE + 0x19c)
+#define AR_PHY_HEAVYCLIP_2      (AR_SM_BASE + 0x1a0)
+#define AR_PHY_HEAVYCLIP_3      (AR_SM_BASE + 0x1a4)
+#define AR_PHY_HEAVYCLIP_4      (AR_SM_BASE + 0x1a8)
+#define AR_PHY_HEAVYCLIP_5      (AR_SM_BASE + 0x1ac)
 #define AR_PHY_ILLEGAL_TXRATE    (AR_SM_BASE + 0x1b0)
 
 #define AR_PHY_POWER_TX_RATE(_d) (AR_SM_BASE + 0x1c0 + ((_d) << 2))
index dbf8f495964217e1b5799fb165155ff3c88b4894..da32c8faad94f1b58f2157c046e5db1bdb04f6f3 100644 (file)
@@ -765,6 +765,8 @@ static int read_file_reset(struct seq_file *file, void *data)
                [RESET_TYPE_BEACON_STUCK] = "Stuck Beacon",
                [RESET_TYPE_MCI] = "MCI Reset",
                [RESET_TYPE_CALIBRATION] = "Calibration error",
+               [RESET_TX_DMA_ERROR] = "Tx DMA stop error",
+               [RESET_RX_DMA_ERROR] = "Rx DMA stop error",
        };
        int i;
 
index a8e9319958e6eca7a645610c24c6b46adc142f3b..cd68c5f0e751a57ed6341e56d710234f92b0f5db 100644 (file)
@@ -50,6 +50,8 @@ enum ath_reset_type {
        RESET_TYPE_BEACON_STUCK,
        RESET_TYPE_MCI,
        RESET_TYPE_CALIBRATION,
+       RESET_TX_DMA_ERROR,
+       RESET_RX_DMA_ERROR,
        __RESET_TYPE_MAX
 };
 
index e98a9eaba7ff3f1b84a85945e63e901c8a216207..1ece42c2443d79e3d73e999b0ff5a6edb8b08fc7 100644 (file)
@@ -30,6 +30,157 @@ struct ath_radar_data {
        u8 pulse_length_pri;
 };
 
+/**** begin: CHIRP ************************************************************/
+
+/* min and max gradients for defined FCC chirping pulses, given by
+ * - 20MHz chirp width over a pulse width of  50us
+ * -  5MHz chirp width over a pulse width of 100us
+ */
+static const int BIN_DELTA_MIN         = 1;
+static const int BIN_DELTA_MAX         = 10;
+
+/* we need at least 3 deltas / 4 samples for a reliable chirp detection */
+#define NUM_DIFFS 3
+static const int FFT_NUM_SAMPLES       = (NUM_DIFFS + 1);
+
+/* Threshold for difference of delta peaks */
+static const int MAX_DIFF              = 2;
+
+/* width range to be checked for chirping */
+static const int MIN_CHIRP_PULSE_WIDTH = 20;
+static const int MAX_CHIRP_PULSE_WIDTH = 110;
+
+struct ath9k_dfs_fft_20 {
+       u8 bin[28];
+       u8 lower_bins[3];
+} __packed;
+struct ath9k_dfs_fft_40 {
+       u8 bin[64];
+       u8 lower_bins[3];
+       u8 upper_bins[3];
+} __packed;
+
+static inline int fft_max_index(u8 *bins)
+{
+       return (bins[2] & 0xfc) >> 2;
+}
+static inline int fft_max_magnitude(u8 *bins)
+{
+       return (bins[0] & 0xc0) >> 6 | bins[1] << 2 | (bins[2] & 0x03) << 10;
+}
+static inline u8 fft_bitmap_weight(u8 *bins)
+{
+       return bins[0] & 0x3f;
+}
+
+static int ath9k_get_max_index_ht40(struct ath9k_dfs_fft_40 *fft,
+                                   bool is_ctl, bool is_ext)
+{
+       const int DFS_UPPER_BIN_OFFSET = 64;
+       /* if detected radar on both channels, select the significant one */
+       if (is_ctl && is_ext) {
+               /* first check wether channels have 'strong' bins */
+               is_ctl = fft_bitmap_weight(fft->lower_bins) != 0;
+               is_ext = fft_bitmap_weight(fft->upper_bins) != 0;
+
+               /* if still unclear, take higher magnitude */
+               if (is_ctl && is_ext) {
+                       int mag_lower = fft_max_magnitude(fft->lower_bins);
+                       int mag_upper = fft_max_magnitude(fft->upper_bins);
+                       if (mag_upper > mag_lower)
+                               is_ctl = false;
+                       else
+                               is_ext = false;
+               }
+       }
+       if (is_ctl)
+               return fft_max_index(fft->lower_bins);
+       return fft_max_index(fft->upper_bins) + DFS_UPPER_BIN_OFFSET;
+}
+static bool ath9k_check_chirping(struct ath_softc *sc, u8 *data,
+                                int datalen, bool is_ctl, bool is_ext)
+{
+       int i;
+       int max_bin[FFT_NUM_SAMPLES];
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
+       int prev_delta;
+
+       if (IS_CHAN_HT40(ah->curchan)) {
+               struct ath9k_dfs_fft_40 *fft = (struct ath9k_dfs_fft_40 *) data;
+               int num_fft_packets = datalen / sizeof(*fft);
+               if (num_fft_packets == 0)
+                       return false;
+
+               ath_dbg(common, DFS, "HT40: datalen=%d, num_fft_packets=%d\n",
+                       datalen, num_fft_packets);
+               if (num_fft_packets < (FFT_NUM_SAMPLES)) {
+                       ath_dbg(common, DFS, "not enough packets for chirp\n");
+                       return false;
+               }
+               /* HW sometimes adds 2 garbage bytes in front of FFT samples */
+               if ((datalen % sizeof(*fft)) == 2) {
+                       fft = (struct ath9k_dfs_fft_40 *) (data + 2);
+                       ath_dbg(common, DFS, "fixing datalen by 2\n");
+               }
+               if (IS_CHAN_HT40MINUS(ah->curchan)) {
+                       int temp = is_ctl;
+                       is_ctl = is_ext;
+                       is_ext = temp;
+               }
+               for (i = 0; i < FFT_NUM_SAMPLES; i++)
+                       max_bin[i] = ath9k_get_max_index_ht40(fft + i, is_ctl,
+                                                             is_ext);
+       } else {
+               struct ath9k_dfs_fft_20 *fft = (struct ath9k_dfs_fft_20 *) data;
+               int num_fft_packets = datalen / sizeof(*fft);
+               if (num_fft_packets == 0)
+                       return false;
+               ath_dbg(common, DFS, "HT20: datalen=%d, num_fft_packets=%d\n",
+                       datalen, num_fft_packets);
+               if (num_fft_packets < (FFT_NUM_SAMPLES)) {
+                       ath_dbg(common, DFS, "not enough packets for chirp\n");
+                       return false;
+               }
+               /* in ht20, this is a 6-bit signed number => shift it to 0 */
+               for (i = 0; i < FFT_NUM_SAMPLES; i++)
+                       max_bin[i] = fft_max_index(fft[i].lower_bins) ^ 0x20;
+       }
+       ath_dbg(common, DFS, "bin_max = [%d, %d, %d, %d]\n",
+               max_bin[0], max_bin[1], max_bin[2], max_bin[3]);
+
+       /* Check for chirp attributes within specs
+        * a) delta of adjacent max_bins is within range
+        * b) delta of adjacent deltas are within tolerance
+        */
+       prev_delta = 0;
+       for (i = 0; i < NUM_DIFFS; i++) {
+               int ddelta = -1;
+               int delta = max_bin[i + 1] - max_bin[i];
+
+               /* ensure gradient is within valid range */
+               if (abs(delta) < BIN_DELTA_MIN || abs(delta) > BIN_DELTA_MAX) {
+                       ath_dbg(common, DFS, "CHIRP: invalid delta %d "
+                               "in sample %d\n", delta, i);
+                       return false;
+               }
+               if (i == 0)
+                       goto done;
+               ddelta = delta - prev_delta;
+               if (abs(ddelta) > MAX_DIFF) {
+                       ath_dbg(common, DFS, "CHIRP: ddelta %d too high\n",
+                               ddelta);
+                       return false;
+               }
+done:
+               ath_dbg(common, DFS, "CHIRP - %d: delta=%d, ddelta=%d\n",
+                       i, delta, ddelta);
+               prev_delta = delta;
+       }
+       return true;
+}
+/**** end: CHIRP **************************************************************/
+
 /* convert pulse duration to usecs, considering clock mode */
 static u32 dur_to_usecs(struct ath_hw *ah, u32 dur)
 {
@@ -113,12 +264,6 @@ ath9k_postprocess_radar_event(struct ath_softc *sc,
                return false;
        }
 
-       /*
-        * TODO: check chirping pulses
-        *       checks for chirping are dependent on the DFS regulatory domain
-        *       used, which is yet TBD
-        */
-
        /* convert duration to usecs */
        pe->width = dur_to_usecs(sc->sc_ah, dur);
        pe->rssi = rssi;
@@ -190,6 +335,16 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
        if (!ath9k_postprocess_radar_event(sc, &ard, &pe))
                return;
 
+       if (pe.width > MIN_CHIRP_PULSE_WIDTH &&
+           pe.width < MAX_CHIRP_PULSE_WIDTH) {
+               bool is_ctl = !!(ard.pulse_bw_info & PRI_CH_RADAR_FOUND);
+               bool is_ext = !!(ard.pulse_bw_info & EXT_CH_RADAR_FOUND);
+               int clen = datalen - 3;
+               pe.chirp = ath9k_check_chirping(sc, data, clen, is_ctl, is_ext);
+       } else {
+               pe.chirp = false;
+       }
+
        ath_dbg(common, DFS,
                "ath9k_dfs_process_phyerr: type=%d, freq=%d, ts=%llu, "
                "width=%d, rssi=%d, delta_ts=%llu\n",
@@ -198,7 +353,8 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
        sc->dfs_prev_pulse_ts = pe.ts;
        if (ard.pulse_bw_info & PRI_CH_RADAR_FOUND)
                ath9k_dfs_process_radar_pulse(sc, &pe);
-       if (ard.pulse_bw_info & EXT_CH_RADAR_FOUND) {
+       if (IS_CHAN_HT40(ah->curchan) &&
+           ard.pulse_bw_info & EXT_CH_RADAR_FOUND) {
                pe.freq += IS_CHAN_HT40PLUS(ah->curchan) ? 20 : -20;
                ath9k_dfs_process_radar_pulse(sc, &pe);
        }
index 6c75fb1ab77d45ba8b6dab67a7fe6bb3b1f3dc98..d3189daf99965e6059d1ce407c99600a822ed69f 100644 (file)
@@ -491,10 +491,9 @@ bool ath_stoprecv(struct ath_softc *sc)
 
        if (!(ah->ah_flags & AH_UNPLUGGED) &&
            unlikely(!stopped)) {
-               ath_err(ath9k_hw_common(sc->sc_ah),
-                       "Could not stop RX, we could be "
-                       "confusing the DMA engine when we start RX up\n");
-               ATH_DBG_WARN_ON_ONCE(!stopped);
+               ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
+                       "Failed to stop Rx DMA\n");
+               RESET_STAT_INC(sc, RESET_RX_DMA_ERROR);
        }
        return stopped && !reset;
 }
index 3ad79bb4f2c21c94b6c41c526a7e033e0937ed77..b766a7fc60aaa051a519ed18830444322e98d4b3 100644 (file)
@@ -1883,8 +1883,11 @@ bool ath_drain_all_txq(struct ath_softc *sc)
                        npend |= BIT(i);
        }
 
-       if (npend)
-               ath_err(common, "Failed to stop TX DMA, queues=0x%03x!\n", npend);
+       if (npend) {
+               RESET_STAT_INC(sc, RESET_TX_DMA_ERROR);
+               ath_dbg(common, RESET,
+                       "Failed to stop TX DMA, queues=0x%03x!\n", npend);
+       }
 
        for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
                if (!ATH_TXQ_SETUP(sc, i))
@@ -2470,8 +2473,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        bf = list_first_entry(&bf_q, struct ath_buf, list);
        hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
 
-       if (hdr->frame_control & IEEE80211_FCTL_MOREDATA) {
-               hdr->frame_control &= ~IEEE80211_FCTL_MOREDATA;
+       if (hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_MOREDATA)) {
+               hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
                dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
                        sizeof(*hdr), DMA_TO_DEVICE);
        }
index 1b5ad1965607cd287ca211d2232da1cdcc8d73fc..cc5c592fc4c007cb7fd0fc8c6c8d05427b274123 100644 (file)
@@ -273,7 +273,7 @@ static bool pseq_handler_create_sequences(struct pri_detector *pde,
                                tmp_false_count++;
                        }
                }
-               if (ps.count < min_count)
+               if (ps.count <= min_count)
                        /* did not reach minimum count, drop sequence */
                        continue;
 
index c79cfe02ec80a62ded454689cee393261484d5ec..e4be2d9bbac40415c8ffc660e674b5a1aba2920c 100644 (file)
@@ -736,6 +736,92 @@ static int wil_fix_bcon(struct wil6210_priv *wil,
        return rc;
 }
 
+/* internal functions for device reset and starting AP */
+static int _wil_cfg80211_set_ies(struct wiphy *wiphy,
+                                size_t probe_ies_len, const u8 *probe_ies,
+                                size_t assoc_ies_len, const u8 *assoc_ies)
+
+{
+       int rc;
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+       /* FW do not form regular beacon, so bcon IE's are not set
+        * For the DMG bcon, when it will be supported, bcon IE's will
+        * be reused; add something like:
+        * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
+        * bcon->beacon_ies);
+        */
+       rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, probe_ies_len, probe_ies);
+       if (rc) {
+               wil_err(wil, "set_ie(PROBE_RESP) failed\n");
+               return rc;
+       }
+
+       rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, assoc_ies_len, assoc_ies);
+       if (rc) {
+               wil_err(wil, "set_ie(ASSOC_RESP) failed\n");
+               return rc;
+       }
+
+       return 0;
+}
+
+static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
+                                 struct net_device *ndev,
+                                 const u8 *ssid, size_t ssid_len, u32 privacy,
+                                 int bi, u8 chan,
+                                 size_t probe_ies_len, const u8 *probe_ies,
+                                 size_t assoc_ies_len, const u8 *assoc_ies,
+                                 u8 hidden_ssid)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       int rc;
+       struct wireless_dev *wdev = ndev->ieee80211_ptr;
+       u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+
+       wil_set_recovery_state(wil, fw_recovery_idle);
+
+       mutex_lock(&wil->mutex);
+
+       __wil_down(wil);
+       rc = __wil_up(wil);
+       if (rc)
+               goto out;
+
+       rc = wmi_set_ssid(wil, ssid_len, ssid);
+       if (rc)
+               goto out;
+
+       rc = _wil_cfg80211_set_ies(wiphy, probe_ies_len, probe_ies,
+                                  assoc_ies_len, assoc_ies);
+       if (rc)
+               goto out;
+
+       wil->privacy = privacy;
+       wil->channel = chan;
+       wil->hidden_ssid = hidden_ssid;
+
+       netif_carrier_on(ndev);
+
+       rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid);
+       if (rc)
+               goto err_pcp_start;
+
+       rc = wil_bcast_init(wil);
+       if (rc)
+               goto err_bcast;
+
+       goto out; /* success */
+
+err_bcast:
+       wmi_pcp_stop(wil);
+err_pcp_start:
+       netif_carrier_off(ndev);
+out:
+       mutex_unlock(&wil->mutex);
+       return rc;
+}
+
 static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
                                      struct net_device *ndev,
                                      struct cfg80211_beacon_data *bcon)
@@ -746,6 +832,7 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
        const u8 *pr_ies = NULL;
        size_t pr_ies_len = 0;
        int rc;
+       u32 privacy = 0;
 
        wil_dbg_misc(wil, "%s()\n", __func__);
        wil_print_bcon_data(bcon);
@@ -760,40 +847,41 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
                wil_print_bcon_data(bcon);
        }
 
-       /* FW do not form regular beacon, so bcon IE's are not set
-        * For the DMG bcon, when it will be supported, bcon IE's will
-        * be reused; add something like:
-        * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
-        * bcon->beacon_ies);
-        */
-       rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
-       if (rc) {
-               wil_err(wil, "set_ie(PROBE_RESP) failed\n");
-               return rc;
-       }
+       if (pr_ies && cfg80211_find_ie(WLAN_EID_RSN, pr_ies, pr_ies_len))
+               privacy = 1;
 
-       rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP,
-                       bcon->assocresp_ies_len,
-                       bcon->assocresp_ies);
-       if (rc) {
-               wil_err(wil, "set_ie(ASSOC_RESP) failed\n");
-               return rc;
+       /* in case privacy has changed, need to restart the AP */
+       if (wil->privacy != privacy) {
+               struct wireless_dev *wdev = ndev->ieee80211_ptr;
+
+               wil_dbg_misc(wil, "privacy changed %d=>%d. Restarting AP\n",
+                            wil->privacy, privacy);
+
+               rc = _wil_cfg80211_start_ap(wiphy, ndev, wdev->ssid,
+                                           wdev->ssid_len, privacy,
+                                           wdev->beacon_interval,
+                                           wil->channel, pr_ies_len, pr_ies,
+                                           bcon->assocresp_ies_len,
+                                           bcon->assocresp_ies,
+                                           wil->hidden_ssid);
+       } else {
+               rc = _wil_cfg80211_set_ies(wiphy, pr_ies_len, pr_ies,
+                                          bcon->assocresp_ies_len,
+                                          bcon->assocresp_ies);
        }
 
-       return 0;
+       return rc;
 }
 
 static int wil_cfg80211_start_ap(struct wiphy *wiphy,
                                 struct net_device *ndev,
                                 struct cfg80211_ap_settings *info)
 {
-       int rc = 0;
+       int rc;
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
-       struct wireless_dev *wdev = ndev->ieee80211_ptr;
        struct ieee80211_channel *channel = info->chandef.chan;
        struct cfg80211_beacon_data *bcon = &info->beacon;
        struct cfg80211_crypto_settings *crypto = &info->crypto;
-       u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
        struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp;
        size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
        const u8 *pr_ies = NULL;
@@ -807,6 +895,23 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
                return -EINVAL;
        }
 
+       switch (info->hidden_ssid) {
+       case NL80211_HIDDEN_SSID_NOT_IN_USE:
+               hidden_ssid = WMI_HIDDEN_SSID_DISABLED;
+               break;
+
+       case NL80211_HIDDEN_SSID_ZERO_LEN:
+               hidden_ssid = WMI_HIDDEN_SSID_SEND_EMPTY;
+               break;
+
+       case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
+               hidden_ssid = WMI_HIDDEN_SSID_CLEAR;
+               break;
+
+       default:
+               wil_err(wil, "AP: Invalid hidden SSID %d\n", info->hidden_ssid);
+               return -EOPNOTSUPP;
+       }
        wil_dbg_misc(wil, "AP on Channel %d %d MHz, %s\n", channel->hw_value,
                     channel->center_freq, info->privacy ? "secure" : "open");
        wil_dbg_misc(wil, "Privacy: %d auth_type %d\n",
@@ -830,70 +935,14 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
                wil_print_bcon_data(bcon);
        }
 
-       wil_set_recovery_state(wil, fw_recovery_idle);
-
-       mutex_lock(&wil->mutex);
-
-       __wil_down(wil);
-       rc = __wil_up(wil);
-       if (rc)
-               goto out;
-
-       rc = wmi_set_ssid(wil, info->ssid_len, info->ssid);
-       if (rc)
-               goto out;
-
-       /* IE's */
-       /* bcon 'head IE's are not relevant for 60g band */
-       /*
-        * FW do not form regular beacon, so bcon IE's are not set
-        * For the DMG bcon, when it will be supported, bcon IE's will
-        * be reused; add something like:
-        * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
-        * bcon->beacon_ies);
-        */
-       wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, pr_ies_len, pr_ies);
-       wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
-                  bcon->assocresp_ies);
-
-       wil->privacy = info->privacy;
-
-       switch (info->hidden_ssid) {
-       case NL80211_HIDDEN_SSID_NOT_IN_USE:
-               hidden_ssid = WMI_HIDDEN_SSID_DISABLED;
-               break;
-
-       case NL80211_HIDDEN_SSID_ZERO_LEN:
-               hidden_ssid = WMI_HIDDEN_SSID_SEND_EMPTY;
-               break;
-
-       case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
-               hidden_ssid = WMI_HIDDEN_SSID_CLEAR;
-               break;
-
-       default:
-               rc = -EOPNOTSUPP;
-               goto out;
-       }
-
-       netif_carrier_on(ndev);
-
-       rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
-                          channel->hw_value, hidden_ssid);
-       if (rc)
-               goto err_pcp_start;
+       rc = _wil_cfg80211_start_ap(wiphy, ndev,
+                                   info->ssid, info->ssid_len, info->privacy,
+                                   info->beacon_interval, channel->hw_value,
+                                   pr_ies_len, pr_ies,
+                                   bcon->assocresp_ies_len,
+                                   bcon->assocresp_ies,
+                                   hidden_ssid);
 
-       rc = wil_bcast_init(wil);
-       if (rc)
-               goto err_bcast;
-
-       goto out; /* success */
-err_bcast:
-       wmi_pcp_stop(wil);
-err_pcp_start:
-       netif_carrier_off(ndev);
-out:
-       mutex_unlock(&wil->mutex);
        return rc;
 }
 
index 275355d46a36fc8c2ae85585fc7188c0a7e64473..c63e4a35eaa0fc95cc0cf9d68847c8a11b0b6749 100644 (file)
@@ -559,6 +559,8 @@ struct wil6210_priv {
        /* profile */
        u32 monitor_flags;
        u32 privacy; /* secure connection? */
+       u8 hidden_ssid; /* relevant in AP mode */
+       u16 channel; /* relevant in AP mode */
        int sinfo_gen;
        u32 ap_isolate; /* no intra-BSS communication */
        /* interrupt moderation */
index 916123a3d74e71053a908491f64eec4a0d978a76..a335f94c72ff7019c3017afadc7946ec26283cc3 100644 (file)
@@ -929,8 +929,8 @@ void b43_lo_g_adjust_to(struct b43_wldev *dev,
        b43_lo_write(dev, &cal->ctl);
 }
 
-/* Periodic LO maintanance work */
-void b43_lo_g_maintanance_work(struct b43_wldev *dev)
+/* Periodic LO maintenance work */
+void b43_lo_g_maintenance_work(struct b43_wldev *dev)
 {
        struct b43_phy *phy = &dev->phy;
        struct b43_phy_g *gphy = phy->g;
index 3b27e20eff80ff1d295d50e879888ed938eda913..7b4df3883bc24ed4f98ef7e8ca755c2304d3a1ad 100644 (file)
@@ -80,7 +80,7 @@ void b43_lo_g_adjust_to(struct b43_wldev *dev,
 
 void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all);
 
-void b43_lo_g_maintanance_work(struct b43_wldev *dev);
+void b43_lo_g_maintenance_work(struct b43_wldev *dev);
 void b43_lo_g_cleanup(struct b43_wldev *dev);
 void b43_lo_g_init(struct b43_wldev *dev);
 
index 727ce6edb4b3831faf6d254ad789b4a9a1a7c336..462310e6e88fbe85716fef77410f2302f3de64b9 100644 (file)
@@ -3004,7 +3004,7 @@ static void b43_gphy_op_pwork_15sec(struct b43_wldev *dev)
                   phy->rev == 1) {
                //TODO: implement rev1 workaround
        }
-       b43_lo_g_maintanance_work(dev);
+       b43_lo_g_maintenance_work(dev);
        b43_mac_enable(dev);
 }
 
index d86d1f1f1c91d70ea803cfd07737ecaab6699400..ffe526070d6f19a99de1a777129a51692e508147 100644 (file)
@@ -5785,6 +5785,7 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy)
 
 static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
 {
+       struct brcmf_pub *drvr = ifp->drvr;
        struct ieee80211_supported_band *band;
        __le32 bandlist[3];
        u32 n_bands;
@@ -5798,6 +5799,19 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
        if (err)
                return err;
 
+       for (i = 0; i < wiphy->iface_combinations->max_interfaces &&
+            i < ARRAY_SIZE(drvr->addresses); i++) {
+               u8 *addr = drvr->addresses[i].addr;
+
+               memcpy(addr, drvr->mac, ETH_ALEN);
+               if (i) {
+                       addr[0] |= BIT(1);
+                       addr[ETH_ALEN - 1] ^= i;
+               }
+       }
+       wiphy->addresses = drvr->addresses;
+       wiphy->n_addresses = i;
+
        wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
        wiphy->cipher_suites = __wl_cipher_suites;
        wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
index fd74a9c6e9ac62ac310c688f5fade1acbf7c7d73..746304121cdbf99bf02845462689737c21272c02 100644 (file)
@@ -21,6 +21,7 @@
 #ifndef BRCMFMAC_CORE_H
 #define BRCMFMAC_CORE_H
 
+#include <net/cfg80211.h>
 #include "fweh.h"
 
 #define TOE_TX_CSUM_OL         0x00000001
@@ -118,6 +119,8 @@ struct brcmf_pub {
        /* Multicast data packets sent to dongle */
        unsigned long tx_multicast;
 
+       struct mac_address addresses[BRCMF_MAX_IFS];
+
        struct brcmf_if *iflist[BRCMF_MAX_IFS];
 
        struct mutex proto_block;
index ab775a5d5b331f975c6b4dadb300a0744556d9b1..d2c5747e3ac9233731d70d8ff391fc8791beaebb 100644 (file)
@@ -1472,9 +1472,7 @@ struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
        wl->timers = t;
 
 #ifdef DEBUG
-       t->name = kmalloc(strlen(name) + 1, GFP_ATOMIC);
-       if (t->name)
-               strcpy(t->name, name);
+       t->name = kstrdup(name, GFP_ATOMIC);
 #endif
 
        return t;
index 7603546d2de322cf8fb5dd5bf0d5ff3e48d8a6be..29185aeccba8b721d3d9c18177f5f86b10d145a2 100644 (file)
@@ -467,7 +467,6 @@ static struct spi_driver spi_driver = {
        .remove         = cw1200_spi_disconnect,
        .driver = {
                .name           = "cw1200_wlan_spi",
-               .bus            = &spi_bus_type,
                .owner          = THIS_MODULE,
 #ifdef CONFIG_PM
                .pm             = &cw1200_pm_ops,
index 08eb229e7816010f11e702d679cb178b213362d2..36818c7f30b962d549867c354521cc72cbb4d26d 100644 (file)
@@ -1410,7 +1410,7 @@ static int ipw2100_power_cycle_adapter(struct ipw2100_priv *priv)
 static int ipw2100_hw_phy_off(struct ipw2100_priv *priv)
 {
 
-#define HW_PHY_OFF_LOOP_DELAY (HZ / 5000)
+#define HW_PHY_OFF_LOOP_DELAY (msecs_to_jiffies(50))
 
        struct host_command cmd = {
                .host_command = CARD_DISABLE_PHY_OFF,
index 7f4cb692cc57e97de9b6bd78ea717df97d03221f..af1b3e6839fa6db3e69e90bee7a50923f1d0230e 100644 (file)
@@ -3259,7 +3259,7 @@ il3945_show_measurement(struct device *d, struct device_attribute *attr,
 
        while (size && PAGE_SIZE - len) {
                hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
-                                  PAGE_SIZE - len, 1);
+                                  PAGE_SIZE - len, true);
                len = strlen(buf);
                if (PAGE_SIZE - len)
                        buf[len++] = '\n';
index 34401015319631bb21afda0f48f283f13f6a2343..908b9f4fef6f3b4e3522b8f2c5a03166ca0cf2b8 100644 (file)
@@ -515,12 +515,8 @@ il_dbgfs_nvm_read(struct file *file, char __user *user_buf, size_t count,
            scnprintf(buf + pos, buf_size - pos, "EEPROM " "version: 0x%x\n",
                      eeprom_ver);
        for (ofs = 0; ofs < eeprom_len; ofs += 16) {
-               pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
-               hex_dump_to_buffer(ptr + ofs, 16, 16, 2, buf + pos,
-                                  buf_size - pos, 0);
-               pos += strlen(buf + pos);
-               if (buf_size - pos > 0)
-                       buf[pos++] = '\n';
+               pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n",
+                                ofs, ptr + ofs);
        }
 
        ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
index b15e4c7acbecd2ed44b1e76360464ef23f3fd9f3..ff63cb5632eb089a817422dddfa9dda0069b2478 100644 (file)
@@ -19,6 +19,7 @@
 
 #include "cfg80211.h"
 #include "main.h"
+#include "11n.h"
 
 static char *reg_alpha2;
 module_param(reg_alpha2, charp, 0);
@@ -34,12 +35,38 @@ static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = {
        },
 };
 
-static const struct ieee80211_iface_combination mwifiex_iface_comb_ap_sta = {
+static const struct ieee80211_iface_combination
+mwifiex_iface_comb_ap_sta = {
        .limits = mwifiex_ap_sta_limits,
        .num_different_channels = 1,
        .n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits),
        .max_interfaces = MWIFIEX_MAX_BSS_NUM,
        .beacon_int_infra_match = true,
+       .radar_detect_widths =  BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+                               BIT(NL80211_CHAN_WIDTH_20) |
+                               BIT(NL80211_CHAN_WIDTH_40),
+};
+
+static const struct ieee80211_iface_combination
+mwifiex_iface_comb_ap_sta_vht = {
+       .limits = mwifiex_ap_sta_limits,
+       .num_different_channels = 1,
+       .n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits),
+       .max_interfaces = MWIFIEX_MAX_BSS_NUM,
+       .beacon_int_infra_match = true,
+       .radar_detect_widths =  BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+                               BIT(NL80211_CHAN_WIDTH_20) |
+                               BIT(NL80211_CHAN_WIDTH_40) |
+                               BIT(NL80211_CHAN_WIDTH_80),
+};
+
+static const struct
+ieee80211_iface_combination mwifiex_iface_comb_ap_sta_drcs = {
+       .limits = mwifiex_ap_sta_limits,
+       .num_different_channels = 2,
+       .n_limits = ARRAY_SIZE(mwifiex_ap_sta_limits),
+       .max_interfaces = MWIFIEX_MAX_BSS_NUM,
+       .beacon_int_infra_match = true,
 };
 
 /*
@@ -441,7 +468,7 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
  *      - Country codes
  *      - Sub bands (first channel, number of channels, maximum Tx power)
  */
-static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
+int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
 {
        u8 no_of_triplet = 0;
        struct ieee80211_country_ie_triplet *t;
@@ -804,10 +831,13 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
                priv->bss_type = MWIFIEX_BSS_TYPE_STA;
                break;
        case NL80211_IFTYPE_P2P_CLIENT:
-       case NL80211_IFTYPE_P2P_GO:
                priv->bss_role =  MWIFIEX_BSS_ROLE_STA;
                priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
                break;
+       case NL80211_IFTYPE_P2P_GO:
+               priv->bss_role =  MWIFIEX_BSS_ROLE_UAP;
+               priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
+               break;
        case NL80211_IFTYPE_AP:
                priv->bss_type = MWIFIEX_BSS_TYPE_UAP;
                priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
@@ -1115,8 +1145,10 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
        case NL80211_IFTYPE_P2P_GO:
                switch (type) {
                case NL80211_IFTYPE_STATION:
-                       if (mwifiex_cfg80211_init_p2p_client(priv))
+                       if (mwifiex_cfg80211_deinit_p2p(priv))
                                return -EFAULT;
+                       priv->adapter->curr_iface_comb.p2p_intf--;
+                       priv->adapter->curr_iface_comb.sta_intf++;
                        dev->ieee80211_ptr->iftype = type;
                        break;
                case NL80211_IFTYPE_ADHOC:
@@ -2788,6 +2820,7 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
 {
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
        struct mwifiex_adapter *adapter = priv->adapter;
+       struct sk_buff *skb, *tmp;
 
 #ifdef CONFIG_DEBUG_FS
        mwifiex_dev_debugfs_remove(priv);
@@ -2795,6 +2828,9 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
 
        mwifiex_stop_net_dev_queue(priv->netdev, adapter);
 
+       skb_queue_walk_safe(&priv->bypass_txq, skb, tmp)
+               mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+
        if (netif_carrier_ok(priv->netdev))
                netif_carrier_off(priv->netdev);
 
@@ -2954,7 +2990,6 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv,
                                        MWIFIEX_MEF_MAX_BYTESEQ)) {
                        mwifiex_dbg(priv->adapter, ERROR,
                                    "Pattern not supported\n");
-                       kfree(mef_entry);
                        return -EOPNOTSUPP;
                }
 
@@ -3036,9 +3071,12 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
 
        mwifiex_set_auto_arp_mef_entry(priv, &mef_entry[0]);
 
-       if (wowlan->n_patterns || wowlan->magic_pkt)
+       if (wowlan->n_patterns || wowlan->magic_pkt) {
                ret = mwifiex_set_wowlan_mef_entry(priv, &mef_cfg,
                                                   &mef_entry[1], wowlan);
+               if (ret)
+                       goto err;
+       }
 
        if (!mef_cfg.criteria)
                mef_cfg.criteria = MWIFIEX_CRITERIA_BROADCAST |
@@ -3048,6 +3086,8 @@ static int mwifiex_set_mef_filter(struct mwifiex_private *priv,
        ret = mwifiex_send_cmd(priv, HostCmd_CMD_MEF_CFG,
                        HostCmd_ACT_GEN_SET, 0,
                        &mef_cfg, true);
+
+err:
        kfree(mef_entry);
        return ret;
 }
@@ -3359,6 +3399,72 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
        return mwifiex_tdls_oper(priv, peer, action);
 }
 
+static int
+mwifiex_cfg80211_tdls_chan_switch(struct wiphy *wiphy, struct net_device *dev,
+                                 const u8 *addr, u8 oper_class,
+                                 struct cfg80211_chan_def *chandef)
+{
+       struct mwifiex_sta_node *sta_ptr;
+       unsigned long flags;
+       u16 chan;
+       u8 second_chan_offset, band;
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+       sta_ptr = mwifiex_get_sta_entry(priv, addr);
+       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+       if (!sta_ptr) {
+               wiphy_err(wiphy, "%s: Invalid TDLS peer %pM\n",
+                         __func__, addr);
+               return -ENOENT;
+       }
+
+       if (!(sta_ptr->tdls_cap.extcap.ext_capab[3] &
+             WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH)) {
+               wiphy_err(wiphy, "%pM do not support tdls cs\n", addr);
+               return -ENOENT;
+       }
+
+       if (sta_ptr->tdls_status == TDLS_CHAN_SWITCHING ||
+           sta_ptr->tdls_status == TDLS_IN_OFF_CHAN) {
+               wiphy_err(wiphy, "channel switch is running, abort request\n");
+               return -EALREADY;
+       }
+
+       chan = chandef->chan->hw_value;
+       second_chan_offset = mwifiex_get_sec_chan_offset(chan);
+       band = chandef->chan->band;
+       mwifiex_start_tdls_cs(priv, addr, chan, second_chan_offset, band);
+
+       return 0;
+}
+
+static void
+mwifiex_cfg80211_tdls_cancel_chan_switch(struct wiphy *wiphy,
+                                        struct net_device *dev,
+                                        const u8 *addr)
+{
+       struct mwifiex_sta_node *sta_ptr;
+       unsigned long flags;
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+       sta_ptr = mwifiex_get_sta_entry(priv, addr);
+       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+       if (!sta_ptr) {
+               wiphy_err(wiphy, "%s: Invalid TDLS peer %pM\n",
+                         __func__, addr);
+       } else if (!(sta_ptr->tdls_status == TDLS_CHAN_SWITCHING ||
+                    sta_ptr->tdls_status == TDLS_IN_BASE_CHAN ||
+                    sta_ptr->tdls_status == TDLS_IN_OFF_CHAN)) {
+               wiphy_err(wiphy, "tdls chan switch not initialize by %pM\n",
+                         addr);
+       } else
+               mwifiex_stop_tdls_cs(priv, addr);
+}
+
 static int
 mwifiex_cfg80211_add_station(struct wiphy *wiphy, struct net_device *dev,
                             const u8 *mac, struct station_parameters *params)
@@ -3575,6 +3681,8 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
        .set_coalesce = mwifiex_cfg80211_set_coalesce,
        .tdls_mgmt = mwifiex_cfg80211_tdls_mgmt,
        .tdls_oper = mwifiex_cfg80211_tdls_oper,
+       .tdls_channel_switch = mwifiex_cfg80211_tdls_chan_switch,
+       .tdls_cancel_channel_switch = mwifiex_cfg80211_tdls_cancel_chan_switch,
        .add_station = mwifiex_cfg80211_add_station,
        .change_station = mwifiex_cfg80211_change_station,
        .get_channel = mwifiex_cfg80211_get_channel,
@@ -3672,7 +3780,12 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
        else
                wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
 
-       wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta;
+       if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
+               wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs;
+       else if (adapter->is_hw_11ac_capable)
+               wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_vht;
+       else
+               wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta;
        wiphy->n_iface_combinations = 1;
 
        /* Initialize cipher suits */
@@ -3709,6 +3822,9 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
                           NL80211_FEATURE_INACTIVITY_TIMER |
                           NL80211_FEATURE_NEED_OBSS_SCAN;
 
+       if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
+               wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
+
        if (adapter->fw_api_ver == MWIFIEX_FW_V15)
                wiphy->features |= NL80211_FEATURE_SK_TX_STATUS;
 
index 51e344789ba214bbbd5cfe7dc29a0a6ac1244527..098e1f14dc9a47efc4aae6e9611f1165c2af5624 100644 (file)
@@ -141,6 +141,9 @@ enum mwifiex_tdls_status {
        TDLS_SETUP_COMPLETE,
        TDLS_SETUP_FAILURE,
        TDLS_LINK_TEARDOWN,
+       TDLS_CHAN_SWITCHING,
+       TDLS_IN_BASE_CHAN,
+       TDLS_IN_OFF_CHAN,
 };
 
 enum mwifiex_tdls_error_code {
index cd09051710e6cee82c624e6960f4c8accd452338..cff38ad129aad93fa8f63c7a8636afc80b51eb96 100644 (file)
@@ -169,14 +169,17 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define TLV_TYPE_UAP_PS_AO_TIMER    (PROPRIETARY_TLV_BASE_ID + 123)
 #define TLV_TYPE_PWK_CIPHER         (PROPRIETARY_TLV_BASE_ID + 145)
 #define TLV_TYPE_GWK_CIPHER         (PROPRIETARY_TLV_BASE_ID + 146)
+#define TLV_TYPE_TX_PAUSE           (PROPRIETARY_TLV_BASE_ID + 148)
 #define TLV_TYPE_COALESCE_RULE      (PROPRIETARY_TLV_BASE_ID + 154)
 #define TLV_TYPE_KEY_PARAM_V2       (PROPRIETARY_TLV_BASE_ID + 156)
+#define TLV_TYPE_MULTI_CHAN_INFO    (PROPRIETARY_TLV_BASE_ID + 183)
 #define TLV_TYPE_TDLS_IDLE_TIMEOUT  (PROPRIETARY_TLV_BASE_ID + 194)
 #define TLV_TYPE_SCAN_CHANNEL_GAP   (PROPRIETARY_TLV_BASE_ID + 197)
 #define TLV_TYPE_API_REV            (PROPRIETARY_TLV_BASE_ID + 199)
 #define TLV_TYPE_CHANNEL_STATS      (PROPRIETARY_TLV_BASE_ID + 198)
 #define TLV_BTCOEX_WL_AGGR_WINSIZE  (PROPRIETARY_TLV_BASE_ID + 202)
 #define TLV_BTCOEX_WL_SCANTIME      (PROPRIETARY_TLV_BASE_ID + 203)
+#define TLV_TYPE_BSS_MODE           (PROPRIETARY_TLV_BASE_ID + 206)
 
 #define MWIFIEX_TX_DATA_BUF_SIZE_2K        2048
 
@@ -200,6 +203,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 
 #define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
 #define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14))
+#define ISSUPP_DRCS_ENABLED(FwCapInfo) (FwCapInfo & BIT(15))
 #define ISSUPP_SDIO_SPA_ENABLED(FwCapInfo) (FwCapInfo & BIT(16))
 
 #define MWIFIEX_DEF_HT_CAP     (IEEE80211_HT_CAP_DSSSCCK40 | \
@@ -359,6 +363,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define HostCmd_CMD_MGMT_FRAME_REG                    0x010c
 #define HostCmd_CMD_REMAIN_ON_CHAN                    0x010d
 #define HostCmd_CMD_11AC_CFG                         0x0112
+#define HostCmd_CMD_TDLS_CONFIG                       0x0100
+#define HostCmd_CMD_MC_POLICY                         0x0121
 #define HostCmd_CMD_TDLS_OPER                         0x0122
 #define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG               0x0223
 
@@ -509,8 +515,10 @@ enum P2P_MODES {
 #define EVENT_TDLS_GENERIC_EVENT        0x00000052
 #define EVENT_RADAR_DETECTED           0x00000053
 #define EVENT_CHANNEL_REPORT_RDY        0x00000054
+#define EVENT_TX_DATA_PAUSE             0x00000055
 #define EVENT_EXT_SCAN_REPORT           0x00000058
 #define EVENT_REMAIN_ON_CHAN_EXPIRED    0x0000005f
+#define EVENT_MULTI_CHAN_INFO           0x0000006a
 #define EVENT_TX_STATUS_REPORT         0x00000074
 #define EVENT_BT_COEX_WLAN_PARA_CHANGE 0X00000076
 
@@ -545,7 +553,27 @@ enum P2P_MODES {
 #define ACT_TDLS_DELETE            0x00
 #define ACT_TDLS_CREATE            0x01
 #define ACT_TDLS_CONFIG            0x02
-#define TDLS_EVENT_LINK_TEAR_DOWN  3
+
+#define TDLS_EVENT_LINK_TEAR_DOWN      3
+#define TDLS_EVENT_CHAN_SWITCH_RESULT  7
+#define TDLS_EVENT_START_CHAN_SWITCH   8
+#define TDLS_EVENT_CHAN_SWITCH_STOPPED 9
+
+#define TDLS_BASE_CHANNEL             0
+#define TDLS_OFF_CHANNEL              1
+
+#define ACT_TDLS_CS_ENABLE_CONFIG 0x00
+#define ACT_TDLS_CS_INIT         0x06
+#define ACT_TDLS_CS_STOP         0x07
+#define ACT_TDLS_CS_PARAMS       0x08
+
+#define MWIFIEX_DEF_CS_UNIT_TIME       2
+#define MWIFIEX_DEF_CS_THR_OTHERLINK   10
+#define MWIFIEX_DEF_THR_DIRECTLINK     0
+#define MWIFIEX_DEF_CS_TIME            10
+#define MWIFIEX_DEF_CS_TIMEOUT         16
+#define MWIFIEX_DEF_CS_REG_CLASS       12
+#define MWIFIEX_DEF_CS_PERIODICITY     1
 
 #define MWIFIEX_FW_V15            15
 
@@ -1131,6 +1159,13 @@ struct host_cmd_ds_tx_rate_query {
        u8 ht_info;
 } __packed;
 
+struct mwifiex_tx_pause_tlv {
+       struct mwifiex_ie_types_header header;
+       u8 peermac[ETH_ALEN];
+       u8 tx_pause;
+       u8 pkt_cnt;
+} __packed;
+
 enum Host_Sleep_Action {
        HS_CONFIGURE = 0x0001,
        HS_ACTIVATE  = 0x0002,
@@ -1249,6 +1284,36 @@ struct host_cmd_ds_tdls_oper {
        u8 peer_mac[ETH_ALEN];
 } __packed;
 
+struct mwifiex_tdls_config {
+       __le16 enable;
+};
+
+struct mwifiex_tdls_config_cs_params {
+       u8 unit_time;
+       u8 thr_otherlink;
+       u8 thr_directlink;
+};
+
+struct mwifiex_tdls_init_cs_params {
+       u8 peer_mac[ETH_ALEN];
+       u8 primary_chan;
+       u8 second_chan_offset;
+       u8 band;
+       __le16 switch_time;
+       __le16 switch_timeout;
+       u8 reg_class;
+       u8 periodicity;
+} __packed;
+
+struct mwifiex_tdls_stop_cs_params {
+       u8 peer_mac[ETH_ALEN];
+};
+
+struct host_cmd_ds_tdls_config {
+       __le16 tdls_action;
+       u8 tdls_data[1];
+} __packed;
+
 struct mwifiex_chan_desc {
        __le16 start_freq;
        u8 chan_width;
@@ -1370,6 +1435,11 @@ struct host_cmd_ds_802_11_scan_ext {
        u8    tlv_buffer[1];
 } __packed;
 
+struct mwifiex_ie_types_bss_mode {
+       struct mwifiex_ie_types_header  header;
+       u8 bss_mode;
+} __packed;
+
 struct mwifiex_ie_types_bss_scan_rsp {
        struct mwifiex_ie_types_header header;
        u8 bssid[ETH_ALEN];
@@ -1908,6 +1978,12 @@ struct mwifiex_radar_det_event {
        __le32 passed;
 } __packed;
 
+struct mwifiex_ie_types_multi_chan_info {
+       struct mwifiex_ie_types_header header;
+       __le16 status;
+       u8 tlv_buffer[0];
+} __packed;
+
 struct meas_rpt_map {
        u8 rssi:3;
        u8 unmeasured:1;
@@ -1927,10 +2003,18 @@ struct host_cmd_ds_802_11_subsc_evt {
        __le16 events;
 } __packed;
 
+struct chan_switch_result {
+       u8 cur_chan;
+       u8 status;
+       u8 reason;
+} __packed;
+
 struct mwifiex_tdls_generic_event {
        __le16 type;
        u8 peer_mac[ETH_ALEN];
        union {
+               struct chan_switch_result switch_result;
+               u8 cs_stop_reason;
                __le16 reason_code;
                __le16 reserved;
        } u;
@@ -1971,6 +2055,11 @@ struct host_cmd_ds_coalesce_cfg {
        struct coalesce_receive_filt_rule rule[0];
 } __packed;
 
+struct host_cmd_ds_multi_chan_policy {
+       __le16 action;
+       __le16 policy;
+} __packed;
+
 struct host_cmd_ds_command {
        __le16 command;
        __le16 size;
@@ -2035,9 +2124,11 @@ struct host_cmd_ds_command {
                struct host_cmd_ds_sta_list sta_list;
                struct host_cmd_11ac_vht_cfg vht_cfg;
                struct host_cmd_ds_coalesce_cfg coalesce_cfg;
+               struct host_cmd_ds_tdls_config tdls_config;
                struct host_cmd_ds_tdls_oper tdls_oper;
                struct host_cmd_ds_chan_rpt_req chan_rpt_req;
                struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg;
+               struct host_cmd_ds_multi_chan_policy mc_policy;
        } params;
 } __packed;
 
index 0ba8945094139c5e02374a11034e66777186c074..abf52d25b9815b720399221e2da00bff9381a2c9 100644 (file)
@@ -409,6 +409,8 @@ int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
        int ret;
 
        ret = mwifiex_uap_parse_tail_ies(priv, info);
+
+       if (ret)
                return ret;
 
        return mwifiex_set_mgmt_beacon_data_ies(priv, info);
@@ -477,6 +479,7 @@ int mwifiex_del_mgmt_ies(struct mwifiex_private *priv)
                                                   ar_ie, &priv->assocresp_idx);
 
 done:
+       kfree(gen_ie);
        kfree(beacon_ie);
        kfree(pr_ie);
        kfree(ar_ie);
index df7fdc09d38c7f1e326b59ad8fd0a9ea53485f74..8fa363add9706364843207a85d475096f20dbeb7 100644 (file)
@@ -77,7 +77,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
 
        priv->media_connected = false;
        eth_broadcast_addr(priv->curr_addr);
-
+       priv->port_open = false;
        priv->pkt_tx_ctrl = 0;
        priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
        priv->data_rate = 0;    /* Initially indicate the rate as auto */
@@ -499,6 +499,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
                INIT_LIST_HEAD(&priv->sta_list);
                INIT_LIST_HEAD(&priv->auto_tdls_list);
                skb_queue_head_init(&priv->tdls_txq);
+               skb_queue_head_init(&priv->bypass_txq);
 
                spin_lock_init(&priv->tx_ba_stream_tbl_lock);
                spin_lock_init(&priv->rx_reorder_tbl_lock);
index 56b024a6aaa58d2b89ca25692c37163367eda597..3cda1f956f0b1654ec438291e22f416ada279e17 100644 (file)
@@ -783,6 +783,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
 
        if (priv->sec_info.wpa_enabled || priv->sec_info.wpa2_enabled)
                priv->scan_block = true;
+       else
+               priv->port_open = true;
 
 done:
        /* Need to indicate IOCTL complete */
index 3ba4e0e04223bcde4fd0da162db6bb6db6160b62..278dc94eaecbb28e12a075526572c342e52f31cf 100644 (file)
@@ -276,6 +276,7 @@ process_start:
                     !adapter->pm_wakeup_fw_try) &&
                    (is_command_pending(adapter) ||
                     !skb_queue_empty(&adapter->tx_data_q) ||
+                    !mwifiex_bypass_txlist_empty(adapter) ||
                     !mwifiex_wmm_lists_empty(adapter))) {
                        adapter->pm_wakeup_fw_try = true;
                        mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3));
@@ -299,9 +300,16 @@ process_start:
 
                        if ((!adapter->scan_chan_gap_enabled &&
                             adapter->scan_processing) || adapter->data_sent ||
+                            mwifiex_is_tdls_chan_switching
+                            (mwifiex_get_priv(adapter,
+                                              MWIFIEX_BSS_ROLE_STA)) ||
                            (mwifiex_wmm_lists_empty(adapter) &&
+                            mwifiex_bypass_txlist_empty(adapter) &&
                             skb_queue_empty(&adapter->tx_data_q))) {
                                if (adapter->cmd_sent || adapter->curr_cmd ||
+                                       !mwifiex_is_send_cmd_allowed
+                                               (mwifiex_get_priv(adapter,
+                                               MWIFIEX_BSS_ROLE_STA)) ||
                                    (!is_command_pending(adapter)))
                                        break;
                        }
@@ -342,7 +350,9 @@ process_start:
                        continue;
                }
 
-               if (!adapter->cmd_sent && !adapter->curr_cmd) {
+               if (!adapter->cmd_sent && !adapter->curr_cmd &&
+                   mwifiex_is_send_cmd_allowed
+                   (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
                        if (mwifiex_exec_next_cmd(adapter) == -1) {
                                ret = -1;
                                break;
@@ -365,7 +375,25 @@ process_start:
 
                if ((adapter->scan_chan_gap_enabled ||
                     !adapter->scan_processing) &&
-                   !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter)) {
+                   !adapter->data_sent &&
+                   !mwifiex_bypass_txlist_empty(adapter) &&
+                   !mwifiex_is_tdls_chan_switching
+                       (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
+                       mwifiex_process_bypass_tx(adapter);
+                       if (adapter->hs_activated) {
+                               adapter->is_hs_configured = false;
+                               mwifiex_hs_activated_event
+                                       (mwifiex_get_priv
+                                        (adapter, MWIFIEX_BSS_ROLE_ANY),
+                                        false);
+                       }
+               }
+
+               if ((adapter->scan_chan_gap_enabled ||
+                    !adapter->scan_processing) &&
+                   !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter) &&
+                   !mwifiex_is_tdls_chan_switching
+                       (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
                        mwifiex_wmm_process_tx(adapter);
                        if (adapter->hs_activated) {
                                adapter->is_hs_configured = false;
@@ -379,6 +407,7 @@ process_start:
                if (adapter->delay_null_pkt && !adapter->cmd_sent &&
                    !adapter->curr_cmd && !is_command_pending(adapter) &&
                    (mwifiex_wmm_lists_empty(adapter) &&
+                    mwifiex_bypass_txlist_empty(adapter) &&
                     skb_queue_empty(&adapter->tx_data_q))) {
                        if (!mwifiex_send_null_packet
                            (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
@@ -649,6 +678,26 @@ mwifiex_close(struct net_device *dev)
        return 0;
 }
 
+static bool
+mwifiex_bypass_tx_queue(struct mwifiex_private *priv,
+                       struct sk_buff *skb)
+{
+       struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
+
+       if (ntohs(eth_hdr->h_proto) == ETH_P_PAE ||
+           mwifiex_is_skb_mgmt_frame(skb) ||
+           (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
+            ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+            (ntohs(eth_hdr->h_proto) == ETH_P_TDLS))) {
+               mwifiex_dbg(priv->adapter, DATA,
+                           "bypass txqueue; eth type %#x, mgmt %d\n",
+                            ntohs(eth_hdr->h_proto),
+                            mwifiex_is_skb_mgmt_frame(skb));
+               return true;
+       }
+
+       return false;
+}
 /*
  * Add buffer into wmm tx queue and queue work to transmit it.
  */
@@ -666,8 +715,14 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
                }
        }
 
-       atomic_inc(&priv->adapter->tx_pending);
-       mwifiex_wmm_add_buf_txqueue(priv, skb);
+       if (mwifiex_bypass_tx_queue(priv, skb)) {
+               atomic_inc(&priv->adapter->tx_pending);
+               atomic_inc(&priv->adapter->bypass_tx_pending);
+               mwifiex_wmm_add_buf_bypass_txqueue(priv, skb);
+        } else {
+               atomic_inc(&priv->adapter->tx_pending);
+               mwifiex_wmm_add_buf_txqueue(priv, skb);
+        }
 
        mwifiex_queue_main_work(priv->adapter);
 
index ae98b5b83b1f0133cc216c2942095081ded5b52d..face7478937f6559e106c4932d6551569b8beaac 100644 (file)
@@ -281,6 +281,7 @@ struct mwifiex_ra_list_tbl {
        u8 amsdu_in_ampdu;
        u16 total_pkt_count;
        bool tdls_link;
+       bool tx_paused;
 };
 
 struct mwifiex_tid_tbl {
@@ -294,6 +295,7 @@ struct mwifiex_tid_tbl {
 struct mwifiex_wmm_desc {
        struct mwifiex_tid_tbl tid_tbl_ptr[MAX_NUM_TID];
        u32 packets_out[MAX_NUM_TID];
+       u32 pkts_paused[MAX_NUM_TID];
        /* spin lock to protect ra_list */
        spinlock_t ra_list_spinlock;
        struct mwifiex_wmm_ac_status ac_status[IEEE80211_NUM_ACS];
@@ -517,6 +519,7 @@ struct mwifiex_private {
        u8 frame_type;
        u8 curr_addr[ETH_ALEN];
        u8 media_connected;
+       u8 port_open;
        u32 num_tx_timeout;
        /* track consecutive timeout */
        u8 tx_timeout_cnt;
@@ -662,6 +665,7 @@ struct mwifiex_private {
        struct cfg80211_beacon_data beacon_after;
        struct mwifiex_11h_intf_state state_11h;
        struct mwifiex_ds_mem_rw mem_rw;
+       struct sk_buff_head bypass_txq;
 };
 
 
@@ -768,6 +772,7 @@ struct mwifiex_sta_node {
        u8 tdls_status;
        struct mwifiex_tdls_capab tdls_cap;
        struct mwifiex_station_stats stats;
+       u8 tx_pause;
 };
 
 struct mwifiex_auto_tdls_peer {
@@ -831,6 +836,7 @@ struct mwifiex_adapter {
        wait_queue_head_t init_wait_q;
        void *card;
        struct mwifiex_if_ops if_ops;
+       atomic_t bypass_tx_pending;
        atomic_t rx_pending;
        atomic_t tx_pending;
        atomic_t cmd_pending;
@@ -979,6 +985,7 @@ struct mwifiex_adapter {
        u8 coex_win_size;
        u8 coex_tx_win_size;
        u8 coex_rx_win_size;
+       bool drcs_enabled;
 };
 
 void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
@@ -1330,6 +1337,21 @@ static inline u8 mwifiex_is_any_intf_active(struct mwifiex_private *priv)
        return 0;
 }
 
+static inline u8 mwifiex_is_tdls_link_setup(u8 status)
+{
+       switch (status) {
+       case TDLS_SETUP_COMPLETE:
+       case TDLS_CHAN_SWITCHING:
+       case TDLS_IN_BASE_CHAN:
+       case TDLS_IN_OFF_CHAN:
+               return true;
+       default:
+               break;
+       }
+
+       return false;
+}
+
 int mwifiex_init_shutdown_fw(struct mwifiex_private *priv,
                             u32 func_init_shutdown);
 int mwifiex_add_card(void *, struct semaphore *, struct mwifiex_if_ops *, u8);
@@ -1458,6 +1480,9 @@ struct mwifiex_sta_node *
 mwifiex_add_sta_entry(struct mwifiex_private *priv, const u8 *mac);
 struct mwifiex_sta_node *
 mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac);
+u8 mwifiex_is_tdls_chan_switching(struct mwifiex_private *priv);
+u8 mwifiex_is_tdls_off_chan(struct mwifiex_private *priv);
+u8 mwifiex_is_send_cmd_allowed(struct mwifiex_private *priv);
 int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
                                 u8 action_code, u8 dialog_token,
                                 u16 status_code, const u8 *extra_ies,
@@ -1488,6 +1513,13 @@ void mwifiex_check_auto_tdls(unsigned long context);
 void mwifiex_add_auto_tdls_peer(struct mwifiex_private *priv, const u8 *mac);
 void mwifiex_setup_auto_tdls_timer(struct mwifiex_private *priv);
 void mwifiex_clean_auto_tdls(struct mwifiex_private *priv);
+int mwifiex_config_tdls_enable(struct mwifiex_private *priv);
+int mwifiex_config_tdls_disable(struct mwifiex_private *priv);
+int mwifiex_config_tdls_cs_params(struct mwifiex_private *priv);
+int mwifiex_stop_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac);
+int mwifiex_start_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac,
+                         u8 primary_chan, u8 second_chan_offset, u8 band);
+
 int mwifiex_cmd_issue_chan_report_request(struct mwifiex_private *priv,
                                          struct host_cmd_ds_command *cmd,
                                          void *data_buf);
@@ -1522,6 +1554,12 @@ void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags);
 void mwifiex_queue_main_work(struct mwifiex_adapter *adapter);
 void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter);
 void mwifiex_11n_delba(struct mwifiex_private *priv, int tid);
+int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy);
+void mwifiex_process_tx_pause_event(struct mwifiex_private *priv,
+                                   struct sk_buff *event);
+void mwifiex_process_multi_chan_event(struct mwifiex_private *priv,
+                                     struct sk_buff *event_skb);
+
 #ifdef CONFIG_DEBUG_FS
 void mwifiex_debugfs_init(void);
 void mwifiex_debugfs_remove(void);
index baf9715ddc1034bc58e6ec4cea367caa670b571c..ef8da8ebcbab4ee5af196c4ec854b178c3b81322 100644 (file)
@@ -823,6 +823,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
        int i;
        u8 ssid_filter;
        struct mwifiex_ie_types_htcap *ht_cap;
+       struct mwifiex_ie_types_bss_mode *bss_mode;
 
        /* The tlv_buf_len is calculated for each scan command.  The TLVs added
           in this routine will be preserved since the routine that sends the
@@ -908,6 +909,10 @@ mwifiex_config_scan(struct mwifiex_private *priv,
                                wildcard_ssid_tlv->max_ssid_length =
                                                        IEEE80211_MAX_SSID_LEN;
 
+                       if (!memcmp(user_scan_in->ssid_list[i].ssid,
+                                   "DIRECT-", 7))
+                               wildcard_ssid_tlv->max_ssid_length = 0xfe;
+
                        memcpy(wildcard_ssid_tlv->ssid,
                               user_scan_in->ssid_list[i].ssid, ssid_len);
 
@@ -968,6 +973,15 @@ mwifiex_config_scan(struct mwifiex_private *priv,
        else
                *max_chan_per_scan = MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD;
 
+       if (adapter->ext_scan) {
+               bss_mode = (struct mwifiex_ie_types_bss_mode *)tlv_pos;
+               bss_mode->header.type = cpu_to_le16(TLV_TYPE_BSS_MODE);
+               bss_mode->header.len = cpu_to_le16(sizeof(bss_mode->bss_mode));
+               bss_mode->bss_mode = scan_cfg_out->bss_mode;
+               tlv_pos += sizeof(bss_mode->header) +
+                          le16_to_cpu(bss_mode->header.len);
+       }
+
        /* If the input config or adapter has the number of Probes set,
           add tlv */
        if (num_probes) {
index 037adcd1f484abeb5f54478893ab7b1ff2b7ab76..a49a80dd773edbf02fe99d433d9eddee48349c76 100644 (file)
 #include "11n.h"
 #include "11ac.h"
 
+static bool drcs;
+module_param(drcs, bool, 0644);
+MODULE_PARM_DESC(drcs, "multi-channel operation:1, single-channel operation:0");
+
 static bool disable_auto_ds;
 module_param(disable_auto_ds, bool, 0);
 MODULE_PARM_DESC(disable_auto_ds,
@@ -1511,6 +1515,22 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
        return 0;
 }
 
+static int
+mwifiex_cmd_set_mc_policy(struct mwifiex_private *priv,
+                         struct host_cmd_ds_command *cmd,
+                         u16 cmd_action, void *data_buf)
+{
+       struct host_cmd_ds_multi_chan_policy *mc_pol = &cmd->params.mc_policy;
+       const u16 *drcs_info = data_buf;
+
+       mc_pol->action = cpu_to_le16(cmd_action);
+       mc_pol->policy = cpu_to_le16(*drcs_info);
+       cmd->command = cpu_to_le16(HostCmd_CMD_MC_POLICY);
+       cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_multi_chan_policy) +
+                               S_DS_GEN);
+       return 0;
+}
+
 static int
 mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
                         struct host_cmd_ds_command *cmd,
@@ -1575,6 +1595,50 @@ mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
        return 0;
 }
 
+static int
+mwifiex_cmd_tdls_config(struct mwifiex_private *priv,
+                       struct host_cmd_ds_command *cmd,
+                       u16 cmd_action, void *data_buf)
+{
+       struct host_cmd_ds_tdls_config *tdls_config = &cmd->params.tdls_config;
+       struct mwifiex_tdls_init_cs_params *config;
+       struct mwifiex_tdls_config *init_config;
+       u16 len;
+
+       cmd->command = cpu_to_le16(HostCmd_CMD_TDLS_CONFIG);
+       cmd->size = cpu_to_le16(S_DS_GEN);
+       tdls_config->tdls_action = cpu_to_le16(cmd_action);
+       le16_add_cpu(&cmd->size, sizeof(tdls_config->tdls_action));
+
+       switch (cmd_action) {
+       case ACT_TDLS_CS_ENABLE_CONFIG:
+               init_config = data_buf;
+               len = sizeof(*init_config);
+               memcpy(tdls_config->tdls_data, init_config, len);
+               break;
+       case ACT_TDLS_CS_INIT:
+               config = data_buf;
+               len = sizeof(*config);
+               memcpy(tdls_config->tdls_data, config, len);
+               break;
+       case ACT_TDLS_CS_STOP:
+               len = sizeof(struct mwifiex_tdls_stop_cs_params);
+               memcpy(tdls_config->tdls_data, data_buf, len);
+               break;
+       case ACT_TDLS_CS_PARAMS:
+               len = sizeof(struct mwifiex_tdls_config_cs_params);
+               memcpy(tdls_config->tdls_data, data_buf, len);
+               break;
+       default:
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "Unknown TDLS configuration\n");
+               return -ENOTSUPP;
+       }
+
+       le16_add_cpu(&cmd->size, len);
+       return 0;
+}
+
 static int
 mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
                      struct host_cmd_ds_command *cmd,
@@ -1933,10 +1997,12 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
                if (priv->bss_mode == NL80211_IFTYPE_ADHOC)
                        cmd_ptr->params.bss_mode.con_type =
                                CONNECTION_TYPE_ADHOC;
-               else if (priv->bss_mode == NL80211_IFTYPE_STATION)
+               else if (priv->bss_mode == NL80211_IFTYPE_STATION ||
+                        priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT)
                        cmd_ptr->params.bss_mode.con_type =
                                CONNECTION_TYPE_INFRA;
-               else if (priv->bss_mode == NL80211_IFTYPE_AP)
+               else if (priv->bss_mode == NL80211_IFTYPE_AP ||
+                        priv->bss_mode == NL80211_IFTYPE_P2P_GO)
                        cmd_ptr->params.bss_mode.con_type = CONNECTION_TYPE_AP;
                cmd_ptr->size = cpu_to_le16(sizeof(struct
                                host_cmd_ds_set_bss_mode) + S_DS_GEN);
@@ -1958,6 +2024,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
        case HostCmd_CMD_TDLS_OPER:
                ret = mwifiex_cmd_tdls_oper(priv, cmd_ptr, data_buf);
                break;
+       case HostCmd_CMD_TDLS_CONFIG:
+               ret = mwifiex_cmd_tdls_config(priv, cmd_ptr, cmd_action,
+                                             data_buf);
+               break;
        case HostCmd_CMD_CHAN_REPORT_REQUEST:
                ret = mwifiex_cmd_issue_chan_report_request(priv, cmd_ptr,
                                                            data_buf);
@@ -1966,6 +2036,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
                ret = mwifiex_cmd_sdio_rx_aggr_cfg(cmd_ptr, cmd_action,
                                                   data_buf);
                break;
+       case HostCmd_CMD_MC_POLICY:
+               ret = mwifiex_cmd_set_mc_policy(priv, cmd_ptr, cmd_action,
+                                               data_buf);
+               break;
        default:
                mwifiex_dbg(priv->adapter, ERROR,
                            "PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -2082,6 +2156,18 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
                        if (ret)
                                return -1;
                }
+
+               if (drcs) {
+                       adapter->drcs_enabled = true;
+                       if (ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
+                               ret = mwifiex_send_cmd(priv,
+                                                      HostCmd_CMD_MC_POLICY,
+                                                      HostCmd_ACT_GEN_SET, 0,
+                                                      &adapter->drcs_enabled,
+                                                      true);
+                       if (ret)
+                               return -1;
+               }
        }
 
        /* get tx rate */
index b645884b3b97a2c1491f745e436d217dec3b5f4d..89e8dafb473876a23d48819f5550b4a24fa0bb1d 100644 (file)
@@ -599,6 +599,7 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv,
                                    "info: key: GTK is set\n");
                        priv->wpa_is_gtk_set = true;
                        priv->scan_block = false;
+                       priv->port_open = true;
                }
        }
 
@@ -629,6 +630,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
                        mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n");
                        priv->wpa_is_gtk_set = true;
                        priv->scan_block = false;
+                       priv->port_open = true;
                }
        }
 
@@ -1191,12 +1193,15 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
                break;
        case HostCmd_CMD_TDLS_OPER:
                ret = mwifiex_ret_tdls_oper(priv, resp);
+       case HostCmd_CMD_MC_POLICY:
                break;
        case HostCmd_CMD_CHAN_REPORT_REQUEST:
                break;
        case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
                ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp);
                break;
+       case HostCmd_CMD_TDLS_CONFIG:
+               break;
        default:
                mwifiex_dbg(adapter, ERROR,
                            "CMD_RESP: unknown cmd response %#x\n",
index 848de2621958cfaacbcd500ae7317fddf6d92085..3d18c585e5436769061a3de767e17c22674206c7 100644 (file)
@@ -54,6 +54,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
        priv->media_connected = false;
 
        priv->scan_block = false;
+       priv->port_open = false;
 
        if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
            ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info)) {
@@ -153,6 +154,7 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
        struct mwifiex_sta_node *sta_ptr;
        struct mwifiex_tdls_generic_event *tdls_evt =
                        (void *)event_skb->data + sizeof(adapter->event_cause);
+       u8 *mac = tdls_evt->peer_mac;
 
        /* reserved 2 bytes are not mandatory in tdls event */
        if (event_skb->len < (sizeof(struct mwifiex_tdls_generic_event) -
@@ -175,6 +177,59 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
                                           le16_to_cpu(tdls_evt->u.reason_code),
                                           GFP_KERNEL);
                break;
+       case TDLS_EVENT_CHAN_SWITCH_RESULT:
+               mwifiex_dbg(adapter, EVENT, "tdls channel switch result :\n");
+               mwifiex_dbg(adapter, EVENT,
+                           "status=0x%x, reason=0x%x cur_chan=%d\n",
+                           tdls_evt->u.switch_result.status,
+                           tdls_evt->u.switch_result.reason,
+                           tdls_evt->u.switch_result.cur_chan);
+
+               /* tdls channel switch failed */
+               if (tdls_evt->u.switch_result.status != 0) {
+                       switch (tdls_evt->u.switch_result.cur_chan) {
+                       case TDLS_BASE_CHANNEL:
+                               sta_ptr->tdls_status = TDLS_IN_BASE_CHAN;
+                               break;
+                       case TDLS_OFF_CHANNEL:
+                               sta_ptr->tdls_status = TDLS_IN_OFF_CHAN;
+                               break;
+                       default:
+                               break;
+                       }
+                       return ret;
+               }
+
+               /* tdls channel switch success */
+               switch (tdls_evt->u.switch_result.cur_chan) {
+               case TDLS_BASE_CHANNEL:
+                       if (sta_ptr->tdls_status == TDLS_IN_BASE_CHAN)
+                               break;
+                       mwifiex_update_ralist_tx_pause_in_tdls_cs(priv, mac,
+                                                                 false);
+                       sta_ptr->tdls_status = TDLS_IN_BASE_CHAN;
+                       break;
+               case TDLS_OFF_CHANNEL:
+                       if (sta_ptr->tdls_status == TDLS_IN_OFF_CHAN)
+                               break;
+                       mwifiex_update_ralist_tx_pause_in_tdls_cs(priv, mac,
+                                                                 true);
+                       sta_ptr->tdls_status = TDLS_IN_OFF_CHAN;
+                       break;
+               default:
+                       break;
+               }
+
+               break;
+       case TDLS_EVENT_START_CHAN_SWITCH:
+               mwifiex_dbg(adapter, EVENT, "tdls start channel switch...\n");
+               sta_ptr->tdls_status = TDLS_CHAN_SWITCHING;
+               break;
+       case TDLS_EVENT_CHAN_SWITCH_STOPPED:
+               mwifiex_dbg(adapter, EVENT,
+                           "tdls chan switch stopped, reason=%d\n",
+                           tdls_evt->u.cs_stop_reason);
+               break;
        default:
                break;
        }
@@ -182,6 +237,145 @@ static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
        return ret;
 }
 
+static void mwifiex_process_uap_tx_pause(struct mwifiex_private *priv,
+                                        struct mwifiex_ie_types_header *tlv)
+{
+       struct mwifiex_tx_pause_tlv *tp;
+       struct mwifiex_sta_node *sta_ptr;
+       unsigned long flags;
+
+       tp = (void *)tlv;
+       mwifiex_dbg(priv->adapter, EVENT,
+                   "uap tx_pause: %pM pause=%d, pkts=%d\n",
+                   tp->peermac, tp->tx_pause,
+                   tp->pkt_cnt);
+
+       if (ether_addr_equal(tp->peermac, priv->netdev->dev_addr)) {
+               if (tp->tx_pause)
+                       priv->port_open = false;
+               else
+                       priv->port_open = true;
+       } else if (is_multicast_ether_addr(tp->peermac)) {
+               mwifiex_update_ralist_tx_pause(priv, tp->peermac, tp->tx_pause);
+       } else {
+               spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+               sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac);
+               spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+               if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) {
+                       sta_ptr->tx_pause = tp->tx_pause;
+                       mwifiex_update_ralist_tx_pause(priv, tp->peermac,
+                                                      tp->tx_pause);
+               }
+       }
+}
+
+static void mwifiex_process_sta_tx_pause(struct mwifiex_private *priv,
+                                        struct mwifiex_ie_types_header *tlv)
+{
+       struct mwifiex_tx_pause_tlv *tp;
+       struct mwifiex_sta_node *sta_ptr;
+       int status;
+       unsigned long flags;
+
+       tp = (void *)tlv;
+       mwifiex_dbg(priv->adapter, EVENT,
+                   "sta tx_pause: %pM pause=%d, pkts=%d\n",
+                   tp->peermac, tp->tx_pause,
+                   tp->pkt_cnt);
+
+       if (ether_addr_equal(tp->peermac, priv->cfg_bssid)) {
+               if (tp->tx_pause)
+                       priv->port_open = false;
+               else
+                       priv->port_open = true;
+       } else {
+               if (!ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+                       return;
+
+               status = mwifiex_get_tdls_link_status(priv, tp->peermac);
+               if (mwifiex_is_tdls_link_setup(status)) {
+                       spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+                       sta_ptr = mwifiex_get_sta_entry(priv, tp->peermac);
+                       spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+
+                       if (sta_ptr && sta_ptr->tx_pause != tp->tx_pause) {
+                               sta_ptr->tx_pause = tp->tx_pause;
+                               mwifiex_update_ralist_tx_pause(priv,
+                                                              tp->peermac,
+                                                              tp->tx_pause);
+                       }
+               }
+       }
+}
+
+void mwifiex_process_multi_chan_event(struct mwifiex_private *priv,
+                                     struct sk_buff *event_skb)
+{
+       struct mwifiex_ie_types_multi_chan_info *chan_info;
+       u16 status;
+
+       chan_info = (void *)event_skb->data + sizeof(u32);
+
+       if (le16_to_cpu(chan_info->header.type) != TLV_TYPE_MULTI_CHAN_INFO) {
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "unknown TLV in chan_info event\n");
+               return;
+       }
+
+       status = le16_to_cpu(chan_info->status);
+
+       if (status) {
+               mwifiex_dbg(priv->adapter, EVENT,
+                           "multi-channel operation started\n");
+       } else {
+               mwifiex_dbg(priv->adapter, EVENT,
+                           "multi-channel operation over\n");
+       }
+}
+
+void mwifiex_process_tx_pause_event(struct mwifiex_private *priv,
+                                   struct sk_buff *event_skb)
+{
+       struct mwifiex_ie_types_header *tlv;
+       u16 tlv_type, tlv_len;
+       int tlv_buf_left;
+
+       if (!priv->media_connected) {
+               mwifiex_dbg(priv->adapter, ERROR,
+                           "tx_pause event while disconnected; bss_role=%d\n",
+                           priv->bss_role);
+               return;
+       }
+
+       tlv_buf_left = event_skb->len - sizeof(u32);
+       tlv = (void *)event_skb->data + sizeof(u32);
+
+       while (tlv_buf_left >= (int)sizeof(struct mwifiex_ie_types_header)) {
+               tlv_type = le16_to_cpu(tlv->type);
+               tlv_len  = le16_to_cpu(tlv->len);
+               if ((sizeof(struct mwifiex_ie_types_header) + tlv_len) >
+                   tlv_buf_left) {
+                       mwifiex_dbg(priv->adapter, ERROR,
+                                   "wrong tlv: tlvLen=%d, tlvBufLeft=%d\n",
+                                   tlv_len, tlv_buf_left);
+                       break;
+               }
+               if (tlv_type == TLV_TYPE_TX_PAUSE) {
+                       if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
+                               mwifiex_process_sta_tx_pause(priv, tlv);
+                       else
+                               mwifiex_process_uap_tx_pause(priv, tlv);
+               }
+
+               tlv_buf_left -= sizeof(struct mwifiex_ie_types_header) +
+                               tlv_len;
+               tlv = (void *)((u8 *)tlv + tlv_len +
+                              sizeof(struct mwifiex_ie_types_header));
+       }
+
+}
+
 /*
 * This function handles coex events generated by firmware
 */
@@ -359,7 +553,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
 
        case EVENT_PS_AWAKE:
                mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
-               if (!adapter->pps_uapsd_mode &&
+               if (!adapter->pps_uapsd_mode && priv->port_open &&
                    priv->media_connected && adapter->sleep_period.period) {
                                adapter->pps_uapsd_mode = true;
                                mwifiex_dbg(adapter, EVENT,
@@ -438,6 +632,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
 
        case EVENT_PORT_RELEASE:
                mwifiex_dbg(adapter, EVENT, "event: PORT RELEASE\n");
+               priv->port_open = true;
                break;
 
        case EVENT_EXT_SCAN_REPORT:
@@ -573,6 +768,16 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                ret = mwifiex_parse_tdls_event(priv, adapter->event_skb);
                break;
 
+       case EVENT_TX_DATA_PAUSE:
+               mwifiex_dbg(adapter, EVENT, "event: TX DATA PAUSE\n");
+               mwifiex_process_tx_pause_event(priv, adapter->event_skb);
+               break;
+
+       case EVENT_MULTI_CHAN_INFO:
+               mwifiex_dbg(adapter, EVENT, "event: multi-chan info\n");
+               mwifiex_process_multi_chan_event(priv, adapter->event_skb);
+               break;
+
        case EVENT_TX_STATUS_REPORT:
                mwifiex_dbg(adapter, EVENT, "event: TX_STATUS Report\n");
                mwifiex_parse_tx_status_event(priv, adapter->event_body);
index 2faa1bc42abee2eb838d65c6c75c33a243f53875..aa3d3c5ed07b3671b8d9064ff020f8fab998efcd 100644 (file)
@@ -49,7 +49,7 @@ static void mwifiex_restore_tdls_packets(struct mwifiex_private *priv,
                tid = skb->priority;
                tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
 
-               if (status == TDLS_SETUP_COMPLETE) {
+               if (mwifiex_is_tdls_link_setup(status)) {
                        ra_list = mwifiex_wmm_get_queue_raptr(priv, tid, mac);
                        ra_list->tdls_link = true;
                        tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
@@ -355,6 +355,7 @@ static void mwifiex_tdls_add_ext_capab(struct mwifiex_private *priv,
        extcap->ieee_hdr.len = 8;
        memset(extcap->ext_capab, 0, 8);
        extcap->ext_capab[4] |= WLAN_EXT_CAPA5_TDLS_ENABLED;
+       extcap->ext_capab[3] |= WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH;
 
        if (priv->adapter->is_hw_11ac_capable)
                extcap->ext_capab[7] |= WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED;
@@ -1071,6 +1072,11 @@ mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer)
                        for (i = 0; i < MAX_NUM_TID; i++)
                                sta_ptr->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
                }
+               if (sta_ptr->tdls_cap.extcap.ext_capab[3] &
+                   WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH) {
+                       mwifiex_config_tdls_enable(priv);
+                       mwifiex_config_tdls_cs_params(priv);
+               }
 
                memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq));
                mwifiex_restore_tdls_packets(priv, peer, TDLS_SETUP_COMPLETE);
@@ -1141,7 +1147,7 @@ int mwifiex_get_tdls_list(struct mwifiex_private *priv,
 
        spin_lock_irqsave(&priv->sta_list_spinlock, flags);
        list_for_each_entry(sta_ptr, &priv->sta_list, list) {
-               if (sta_ptr->tdls_status == TDLS_SETUP_COMPLETE) {
+               if (mwifiex_is_tdls_link_setup(sta_ptr->tdls_status)) {
                        ether_addr_copy(peer->peer_addr, sta_ptr->mac_addr);
                        peer++;
                        count++;
@@ -1295,7 +1301,7 @@ void mwifiex_auto_tdls_update_peer_status(struct mwifiex_private *priv,
                        if ((link_status == TDLS_NOT_SETUP) &&
                            (peer->tdls_status == TDLS_SETUP_INPROGRESS))
                                peer->failure_count++;
-                       else if (link_status == TDLS_SETUP_COMPLETE)
+                       else if (mwifiex_is_tdls_link_setup(link_status))
                                peer->failure_count = 0;
 
                        peer->tdls_status = link_status;
@@ -1367,7 +1373,7 @@ void mwifiex_check_auto_tdls(unsigned long context)
 
                if (((tdls_peer->rssi >= MWIFIEX_TDLS_RSSI_LOW) ||
                     !tdls_peer->rssi) &&
-                   tdls_peer->tdls_status == TDLS_SETUP_COMPLETE) {
+                   mwifiex_is_tdls_link_setup(tdls_peer->tdls_status)) {
                        tdls_peer->tdls_status = TDLS_LINK_TEARDOWN;
                        mwifiex_dbg(priv->adapter, MSG,
                                    "teardown TDLS link,peer=%pM rssi=%d\n",
@@ -1416,3 +1422,67 @@ void mwifiex_clean_auto_tdls(struct mwifiex_private *priv)
                mwifiex_flush_auto_tdls_list(priv);
        }
 }
+
+static int mwifiex_config_tdls(struct mwifiex_private *priv, u8 enable)
+{
+       struct mwifiex_tdls_config config;
+
+       config.enable = cpu_to_le16(enable);
+       return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+                               ACT_TDLS_CS_ENABLE_CONFIG, 0, &config, true);
+}
+
+int mwifiex_config_tdls_enable(struct mwifiex_private *priv)
+{
+       return mwifiex_config_tdls(priv, true);
+}
+
+int mwifiex_config_tdls_disable(struct mwifiex_private *priv)
+{
+       return mwifiex_config_tdls(priv, false);
+}
+
+int mwifiex_config_tdls_cs_params(struct mwifiex_private *priv)
+{
+       struct mwifiex_tdls_config_cs_params config_tdls_cs_params;
+
+       config_tdls_cs_params.unit_time = MWIFIEX_DEF_CS_UNIT_TIME;
+       config_tdls_cs_params.thr_otherlink = MWIFIEX_DEF_CS_THR_OTHERLINK;
+       config_tdls_cs_params.thr_directlink = MWIFIEX_DEF_THR_DIRECTLINK;
+
+       return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+                               ACT_TDLS_CS_PARAMS, 0,
+                               &config_tdls_cs_params, true);
+}
+
+int mwifiex_stop_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac)
+{
+       struct mwifiex_tdls_stop_cs_params stop_tdls_cs_params;
+
+       ether_addr_copy(stop_tdls_cs_params.peer_mac, peer_mac);
+
+       return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+                               ACT_TDLS_CS_STOP, 0,
+                               &stop_tdls_cs_params, true);
+}
+
+int mwifiex_start_tdls_cs(struct mwifiex_private *priv, const u8 *peer_mac,
+                         u8 primary_chan, u8 second_chan_offset, u8 band)
+{
+       struct mwifiex_tdls_init_cs_params start_tdls_cs_params;
+
+       ether_addr_copy(start_tdls_cs_params.peer_mac, peer_mac);
+       start_tdls_cs_params.primary_chan = primary_chan;
+       start_tdls_cs_params.second_chan_offset = second_chan_offset;
+       start_tdls_cs_params.band = band;
+
+       start_tdls_cs_params.switch_time = cpu_to_le16(MWIFIEX_DEF_CS_TIME);
+       start_tdls_cs_params.switch_timeout =
+                                       cpu_to_le16(MWIFIEX_DEF_CS_TIMEOUT);
+       start_tdls_cs_params.reg_class = MWIFIEX_DEF_CS_REG_CLASS;
+       start_tdls_cs_params.periodicity = MWIFIEX_DEF_CS_PERIODICITY;
+
+       return mwifiex_send_cmd(priv, HostCmd_CMD_TDLS_CONFIG,
+                               ACT_TDLS_CS_INIT, 0,
+                               &start_tdls_cs_params, true);
+}
index 5ed9b794053e760163dda372f205fd28463a9efe..8b1e5b5d47feee82a634a8de5856a94a135884fe 100644 (file)
@@ -370,8 +370,28 @@ void mwifiex_parse_tx_status_event(struct mwifiex_private *priv,
                        /* consumes ack_skb */
                        skb_complete_wifi_ack(ack_skb, !tx_status->status);
                } else {
+                       /* Remove broadcast address which was added by driver */
+                       memmove(ack_skb->data +
+                               sizeof(struct ieee80211_hdr_3addr) +
+                               MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16),
+                               ack_skb->data +
+                               sizeof(struct ieee80211_hdr_3addr) +
+                               MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16) +
+                               ETH_ALEN, ack_skb->len -
+                               (sizeof(struct ieee80211_hdr_3addr) +
+                               MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(u16) +
+                               ETH_ALEN));
+                       ack_skb->len = ack_skb->len - ETH_ALEN;
+                       /* Remove driver's proprietary header including 2 bytes
+                        * of packet length and pass actual management frame buffer
+                        * to cfg80211.
+                        */
                        cfg80211_mgmt_tx_status(&priv->wdev, tx_info->cookie,
-                                               ack_skb->data, ack_skb->len,
+                                               ack_skb->data +
+                                               MWIFIEX_MGMT_FRAME_HEADER_SIZE +
+                                               sizeof(u16), ack_skb->len -
+                                               (MWIFIEX_MGMT_FRAME_HEADER_SIZE
+                                                + sizeof(u16)),
                                                !tx_status->status, GFP_ATOMIC);
                        dev_kfree_skb_any(ack_skb);
                }
index b74930054b8c0b1f1cc2066ef2d22f2ca542ebb4..4d5a6e3b6361700c9bcab06201a70cd29d38b66e 100644 (file)
@@ -808,7 +808,7 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv,
                             struct mwifiex_uap_bss_param *bss_cfg,
                             struct cfg80211_chan_def chandef)
 {
-       u8 config_bands = 0;
+       u8 config_bands = 0, old_bands = priv->adapter->config_bands;
 
        priv->bss_chandef = chandef;
 
@@ -834,6 +834,11 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv,
        }
 
        priv->adapter->config_bands = config_bands;
+
+       if (old_bands != config_bands) {
+               mwifiex_send_domain_info_cmd_fw(priv->adapter->wiphy);
+               mwifiex_dnld_txpwr_table(priv);
+       }
 }
 
 int mwifiex_config_start_uap(struct mwifiex_private *priv,
index 7bc1f850e3b7195302577738ef3c546f4fddd92a..492a8b3c636e2bcaac9b0fb81620bbf1d1327cd2 100644 (file)
@@ -176,6 +176,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
                break;
        case EVENT_UAP_BSS_IDLE:
                priv->media_connected = false;
+               priv->port_open = false;
                if (netif_carrier_ok(priv->netdev))
                        netif_carrier_off(priv->netdev);
                mwifiex_stop_net_dev_queue(priv->netdev, adapter);
@@ -185,6 +186,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
                break;
        case EVENT_UAP_BSS_ACTIVE:
                priv->media_connected = true;
+               priv->port_open = true;
                if (!netif_carrier_ok(priv->netdev))
                        netif_carrier_on(priv->netdev);
                mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
@@ -192,6 +194,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
        case EVENT_UAP_BSS_START:
                mwifiex_dbg(adapter, EVENT,
                            "AP EVENT: event id: %#x\n", eventcause);
+               priv->port_open = false;
                memcpy(priv->netdev->dev_addr, adapter->event_body + 2,
                       ETH_ALEN);
                if (priv->hist_data)
@@ -297,6 +300,16 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
                mwifiex_bt_coex_wlan_param_update_event(priv,
                                                        adapter->event_skb);
                break;
+       case EVENT_TX_DATA_PAUSE:
+               mwifiex_dbg(adapter, EVENT, "event: TX DATA PAUSE\n");
+               mwifiex_process_tx_pause_event(priv, adapter->event_skb);
+               break;
+
+       case EVENT_MULTI_CHAN_INFO:
+               mwifiex_dbg(adapter, EVENT, "event: multi-chan info\n");
+               mwifiex_process_multi_chan_event(priv, adapter->event_skb);
+               break;
+
        default:
                mwifiex_dbg(adapter, EVENT,
                            "event: unknown event id: %#x\n", eventcause);
index aada93425f806a74b481e937d7f25fab5a68d4d3..fbad99c503078ab9e2e840da438892a2067e92aa 100644 (file)
@@ -244,9 +244,11 @@ setup_for_next:
        if (card->rx_cmd_ep == context->ep) {
                mwifiex_usb_submit_rx_urb(context, size);
        } else {
-               context->skb = NULL;
-               if (atomic_read(&adapter->rx_pending) <= HIGH_RX_PENDING)
+               if (atomic_read(&adapter->rx_pending) <= HIGH_RX_PENDING){
                        mwifiex_usb_submit_rx_urb(context, size);
+               }else{
+                       context->skb = NULL;
+               }
        }
 
        return;
index 790e61953abffc8218ea3ecc6ec5cc2d4d9c630f..2504e422364a526246581a963969bcf87e4fc01b 100644 (file)
@@ -531,6 +531,65 @@ mwifiex_get_sta_entry(struct mwifiex_private *priv, const u8 *mac)
        return NULL;
 }
 
+static struct mwifiex_sta_node *
+mwifiex_get_tdls_sta_entry(struct mwifiex_private *priv, u8 status)
+{
+       struct mwifiex_sta_node *node;
+
+       list_for_each_entry(node, &priv->sta_list, list) {
+               if (node->tdls_status == status)
+                       return node;
+       }
+
+       return NULL;
+}
+
+/* If tdls channel switching is on-going, tx data traffic should be
+ * blocked until the switching stage completed.
+ */
+u8 mwifiex_is_tdls_chan_switching(struct mwifiex_private *priv)
+{
+       struct mwifiex_sta_node *sta_ptr;
+
+       if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+               return false;
+
+       sta_ptr = mwifiex_get_tdls_sta_entry(priv, TDLS_CHAN_SWITCHING);
+       if (sta_ptr)
+               return true;
+
+       return false;
+}
+
+u8 mwifiex_is_tdls_off_chan(struct mwifiex_private *priv)
+{
+       struct mwifiex_sta_node *sta_ptr;
+
+       if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+               return false;
+
+       sta_ptr = mwifiex_get_tdls_sta_entry(priv, TDLS_IN_OFF_CHAN);
+       if (sta_ptr)
+               return true;
+
+       return false;
+}
+
+/* If tdls channel switching is on-going or tdls operate on off-channel,
+ * cmd path should be blocked until tdls switched to base-channel.
+ */
+u8 mwifiex_is_send_cmd_allowed(struct mwifiex_private *priv)
+{
+       if (!priv || !ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+               return true;
+
+       if (mwifiex_is_tdls_chan_switching(priv) ||
+           mwifiex_is_tdls_off_chan(priv))
+               return false;
+
+       return true;
+}
+
 /* This function will add a sta_node entry to associated station list
  * table with the given mac address.
  * If entry exist already, existing entry is returned.
index a8ea21c3340c73537c8f597ad6dc2d176b45f588..173d3663c2e042bfe44e680e04acf86cf43f7c30 100644 (file)
@@ -160,9 +160,10 @@ void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
                ra_list->tdls_link = false;
                ra_list->ba_status = BA_SETUP_NONE;
                ra_list->amsdu_in_ampdu = false;
+               ra_list->tx_paused = false;
                if (!mwifiex_queuing_ra_based(priv)) {
-                       if (mwifiex_get_tdls_link_status(priv, ra) ==
-                           TDLS_SETUP_COMPLETE) {
+                       if (mwifiex_is_tdls_link_setup
+                               (mwifiex_get_tdls_link_status(priv, ra))) {
                                ra_list->tdls_link = true;
                                ra_list->is_11n_enabled =
                                        mwifiex_tdls_peer_11n_enabled(priv, ra);
@@ -448,6 +449,11 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
        }
 }
 
+int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter)
+{
+       return atomic_read(&adapter->bypass_tx_pending) ? false : true;
+}
+
 /*
  * This function checks if WMM Tx queue is empty.
  */
@@ -459,6 +465,8 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
 
        for (i = 0; i < adapter->priv_num; ++i) {
                priv = adapter->priv[i];
+               if (priv && !priv->port_open)
+                       continue;
                if (priv && atomic_read(&priv->wmm.tx_pkts_queued))
                        return false;
        }
@@ -580,6 +588,10 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
        skb_queue_walk_safe(&priv->tdls_txq, skb, tmp)
                mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
 
+       skb_queue_walk_safe(&priv->bypass_txq, skb, tmp)
+               mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+       atomic_set(&priv->adapter->bypass_tx_pending, 0);
+
        idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL);
        idr_destroy(&priv->ack_status_frames);
 }
@@ -603,6 +615,88 @@ mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
        return NULL;
 }
 
+void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac,
+                                   u8 tx_pause)
+{
+       struct mwifiex_ra_list_tbl *ra_list;
+       u32 pkt_cnt = 0, tx_pkts_queued;
+       unsigned long flags;
+       int i;
+
+       spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+       for (i = 0; i < MAX_NUM_TID; ++i) {
+               ra_list = mwifiex_wmm_get_ralist_node(priv, i, mac);
+               if (ra_list && ra_list->tx_paused != tx_pause) {
+                       pkt_cnt += ra_list->total_pkt_count;
+                       ra_list->tx_paused = tx_pause;
+                       if (tx_pause)
+                               priv->wmm.pkts_paused[i] +=
+                                       ra_list->total_pkt_count;
+                       else
+                               priv->wmm.pkts_paused[i] -=
+                                       ra_list->total_pkt_count;
+               }
+       }
+
+       if (pkt_cnt) {
+               tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
+               if (tx_pause)
+                       tx_pkts_queued -= pkt_cnt;
+               else
+                       tx_pkts_queued += pkt_cnt;
+
+               atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
+               atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
+       }
+       spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+}
+
+/* This function update non-tdls peer ralist tx_pause while
+ * tdls channel swithing
+ */
+void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv,
+                                              u8 *mac, u8 tx_pause)
+{
+       struct mwifiex_ra_list_tbl *ra_list;
+       u32 pkt_cnt = 0, tx_pkts_queued;
+       unsigned long flags;
+       int i;
+
+       spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+       for (i = 0; i < MAX_NUM_TID; ++i) {
+               list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[i].ra_list,
+                                   list) {
+                       if (!memcmp(ra_list->ra, mac, ETH_ALEN))
+                               continue;
+
+                       if (ra_list && ra_list->tx_paused != tx_pause) {
+                               pkt_cnt += ra_list->total_pkt_count;
+                               ra_list->tx_paused = tx_pause;
+                               if (tx_pause)
+                                       priv->wmm.pkts_paused[i] +=
+                                               ra_list->total_pkt_count;
+                               else
+                                       priv->wmm.pkts_paused[i] -=
+                                               ra_list->total_pkt_count;
+                       }
+               }
+       }
+
+       if (pkt_cnt) {
+               tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
+               if (tx_pause)
+                       tx_pkts_queued -= pkt_cnt;
+               else
+                       tx_pkts_queued += pkt_cnt;
+
+               atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
+               atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
+       }
+       spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+}
+
 /*
  * This function retrieves an RA list node for a given TID and
  * RA address pair.
@@ -669,6 +763,18 @@ mwifiex_is_ralist_valid(struct mwifiex_private *priv,
        return false;
 }
 
+/*
+ * This function adds a packet to bypass TX queue.
+ * This is special TX queue for packets which can be sent even when port_open
+ * is false.
+ */
+void
+mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private *priv,
+                                  struct sk_buff *skb)
+{
+       skb_queue_tail(&priv->bypass_txq, skb);
+}
+
 /*
  * This function adds a packet to WMM queue.
  *
@@ -723,6 +829,9 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
            !mwifiex_is_skb_mgmt_frame(skb)) {
                switch (tdls_status) {
                case TDLS_SETUP_COMPLETE:
+               case TDLS_CHAN_SWITCHING:
+               case TDLS_IN_BASE_CHAN:
+               case TDLS_IN_OFF_CHAN:
                        ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down,
                                                              ra);
                        tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
@@ -765,7 +874,10 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
                atomic_set(&priv->wmm.highest_queued_prio,
                           priv->tos_to_tid_inv[tid_down]);
 
-       atomic_inc(&priv->wmm.tx_pkts_queued);
+       if (ra_list->tx_paused)
+               priv->wmm.pkts_paused[tid_down]++;
+       else
+               atomic_inc(&priv->wmm.tx_pkts_queued);
 
        spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
 }
@@ -970,7 +1082,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
 
                        priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
 
-                       if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0)
+                       if (!priv_tmp->port_open ||
+                           (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0))
                                continue;
 
                        /* iterate over the WMM queues of the BSS */
@@ -987,7 +1100,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
                                list_for_each_entry(ptr, &tid_ptr->ra_list,
                                                    list) {
 
-                                       if (!skb_queue_empty(&ptr->skb_head))
+                                       if (!ptr->tx_paused &&
+                                           !skb_queue_empty(&ptr->skb_head))
                                                /* holds both locks */
                                                goto found;
                                }
@@ -1339,6 +1453,38 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
        return 0;
 }
 
+void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter)
+{
+       struct mwifiex_tx_param tx_param;
+       struct sk_buff *skb;
+       struct mwifiex_txinfo *tx_info;
+       struct mwifiex_private *priv;
+       int i;
+
+       if (adapter->data_sent || adapter->tx_lock_flag)
+               return;
+
+       for (i = 0; i < adapter->priv_num; ++i) {
+               priv = adapter->priv[i];
+
+               if (skb_queue_empty(&priv->bypass_txq))
+                       continue;
+
+               skb = skb_dequeue(&priv->bypass_txq);
+               tx_info = MWIFIEX_SKB_TXCB(skb);
+
+               /* no aggregation for bypass packets */
+               tx_param.next_pkt_len = 0;
+
+               if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
+                       skb_queue_head(&priv->bypass_txq, skb);
+                       tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
+               } else {
+                       atomic_dec(&adapter->bypass_tx_pending);
+               }
+       }
+}
+
 /*
  * This function transmits the highest priority packet awaiting in the
  * WMM Queues.
index 48ece0b355919d3c3a4278dfc1727fc391f5848d..38f09762bd2f93ba90ef341ac5d0deda575046dc 100644 (file)
@@ -99,12 +99,16 @@ mwifiex_wmm_is_ra_list_empty(struct list_head *ra_list_hhead)
 
 void mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
                                 struct sk_buff *skb);
+void mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private *priv,
+                                       struct sk_buff *skb);
 void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra);
 void mwifiex_rotate_priolists(struct mwifiex_private *priv,
                              struct mwifiex_ra_list_tbl *ra, int tid);
 
 int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter);
+int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter);
 void mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter);
+void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter);
 int mwifiex_is_ralist_valid(struct mwifiex_private *priv,
                            struct mwifiex_ra_list_tbl *ra_list, int tid);
 
@@ -126,6 +130,10 @@ struct mwifiex_ra_list_tbl *
 mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
                            const u8 *ra_addr);
 u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid);
+void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac,
+                                   u8 tx_pause);
+void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv,
+                                              u8 *mac, u8 tx_pause);
 
 struct mwifiex_ra_list_tbl *mwifiex_wmm_get_ralist_node(struct mwifiex_private
                                        *priv, u8 tid, const u8 *ra_addr);
index c940a87175ca15d701bad48abbe2be5dd755091c..74a479ac323d53716e5e9b02f7d179d4370ae5c5 100644 (file)
 /*-------------------------------------------------------------------------
  *     Chip specific
  *-------------------------------------------------------------------------*/
-#define CHIP_8723                      BIT(2) /* RTL8723 With BT feature */
-#define CHIP_8723_DRV_REV              BIT(3) /* RTL8723 Driver Revised */
 #define NORMAL_CHIP                    BIT(4)
 #define CHIP_VENDOR_UMC                        BIT(5)
 #define CHIP_VENDOR_UMC_B_CUT          BIT(6)
 
-#define IS_8723_SERIES(version)                \
-       (((version) & CHIP_8723) ? true : false)
-
 #define IS_92C_1T2R(version)           \
        (((version) & CHIP_92C) && ((version) & CHIP_92C_1T2R))
 
 #define IS_VENDOR_UMC(version)         \
        (((version) & CHIP_VENDOR_UMC) ? true : false)
 
-#define IS_VENDOR_8723_A_CUT(version)  \
-       (((version) & CHIP_VENDOR_UMC) ? (((version) & (BIT(6))) ? \
-       false : true) : false)
-
 #define CHIP_BONDING_92C_1T2R  0x1
 #define CHIP_BONDING_IDENTIFIER(_value)        (((_value) >> 22) & 0x3)
index 767358a553fb083dcc39297fe6f7ef6be74cbad4..7cf36619f25005e4395251ba663b2b15cd14e41a 100644 (file)
@@ -2280,7 +2280,6 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-       struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
        enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
        u8 u1tmp = 0;
        bool actuallyset = false;
@@ -2357,20 +2356,7 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
                if (ppsc->pwrdown_mode && e_rfpowerstate_toset == ERFOFF) {
                        /* Enable register area 0x0-0xc. */
                        rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0);
-                       if (IS_HARDWARE_TYPE_8723U(rtlhal)) {
-                               /*
-                                * We should configure HW PDn source for WiFi
-                                * ONLY, and then our HW will be set in
-                                * power-down mode if PDn source from all
-                                * functions are configured.
-                                */
-                               u1tmp = rtl_read_byte(rtlpriv,
-                                                     REG_MULTI_FUNC_CTRL);
-                               rtl_write_byte(rtlpriv, REG_MULTI_FUNC_CTRL,
-                                              (u1tmp|WL_HWPDN_EN));
-                       } else {
-                               rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x8812);
-                       }
+                       rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x8812);
                }
                if (e_rfpowerstate_toset == ERFOFF) {
                        if (ppsc->reg_rfps_level  & RT_RF_OFF_LEVL_ASPM)
index 490a7cf7c702fa263b0f392b2ed103d0c4df6848..1c55a002d4bd9d9cc48986483a29cbfd5228b0ab 100644 (file)
@@ -69,8 +69,6 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
                chip_version = NORMAL_CHIP;
                chip_version |= ((value32 & TYPE_ID) ? CHIP_92C : 0);
                chip_version |= ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0);
-               /* RTL8723 with BT function. */
-               chip_version |= ((value32 & BT_FUNC) ? CHIP_8723 : 0);
                if (IS_VENDOR_UMC(chip_version))
                        chip_version |= ((value32 & CHIP_VER_RTL_MASK) ?
                                         CHIP_VENDOR_UMC_B_CUT : 0);
@@ -78,10 +76,6 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
                        value32 = rtl_read_dword(rtlpriv, REG_HPON_FSM);
                        chip_version |= ((CHIP_BONDING_IDENTIFIER(value32) ==
                                 CHIP_BONDING_92C_1T2R) ? CHIP_92C_1T2R : 0);
-               } else if (IS_8723_SERIES(chip_version)) {
-                       value32 = rtl_read_dword(rtlpriv, REG_GPIO_OUTSTS);
-                       chip_version |= ((value32 & RF_RL_ID) ?
-                                         CHIP_8723_DRV_REV : 0);
                }
        }
        rtlhal->version  = (enum version_8192c)chip_version;
@@ -114,12 +108,6 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
        case VERSION_NORMAL_UMC_CHIP_88C_B_CUT:
                versionid = "NORMAL_UMC_CHIP_88C_B_CUT";
                break;
-       case VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT:
-               versionid = "NORMAL_UMC_CHIP_8723_1T1R_A_CUT";
-               break;
-       case VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT:
-               versionid = "NORMAL_UMC_CHIP_8723_1T1R_B_CUT";
-               break;
        case VERSION_TEST_CHIP_92C:
                versionid = "TEST_CHIP_92C";
                break;
index 1961b8e28dc16eb7261bc267641cf64b06791b77..bb06fe836fe753037925bf18dcfa6f8d4186ffb8 100644 (file)
@@ -3515,14 +3515,14 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
        for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
             rfpath++) {
                if (rtlhal->current_bandtype == BAND_ON_2_4G) {
-                       /* MOD_AG for RF paht_A 0x18 BIT8,BIT16 */
+                       /* MOD_AG for RF path_A 0x18 BIT8,BIT16 */
                        rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(8) | BIT(16) |
                                      BIT(18), 0);
                        /* RF0x0b[16:14] =3b'111 */
                        rtl_set_rfreg(hw, (enum radio_path)rfpath, 0x0B,
                                      0x1c000, 0x07);
                } else {
-                       /* MOD_AG for RF paht_A 0x18 BIT8,BIT16 */
+                       /* MOD_AG for RF path_A 0x18 BIT8,BIT16 */
                        rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(8) |
                                      BIT(16) | BIT(18),
                                      (BIT(16) | BIT(8)) >> 8);
index 3236d44b459df69efd4f2b363f4b651e478a10a4..b7f18e2155eb18358cf4d4f9f3f82774f9b6f522 100644 (file)
@@ -2180,7 +2180,7 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw,
 
        rtl_write_byte(rtlpriv, MSR, bt_msr);
        rtlpriv->cfg->ops->led_control(hw, ledaction);
-       if ((bt_msr & 0xfc) == MSR_AP)
+       if ((bt_msr & MSR_MASK) == MSR_AP)
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
        else
                rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
index 53668fc8f23e211dab03085359ce013fc65b81c2..1d6110f9c1fb6e29e75e8bde9c1533b026a8b757 100644 (file)
 #define        MSR_ADHOC                               0x01
 #define        MSR_INFRA                               0x02
 #define        MSR_AP                                  0x03
+#define MSR_MASK                               0x03
 
 #define        RRSR_RSC_OFFSET                         21
 #define        RRSR_SHORT_OFFSET                       23
index e125974285cc890e0b40671886815c996ef37a11..7df672a84530b0217378d8f9b66c2e10d31e044e 100644 (file)
@@ -74,7 +74,8 @@ static void wl1271_rx_status(struct wl1271 *wl,
        if (desc->rate <= wl->hw_min_ht_rate)
                status->flag |= RX_FLAG_HT;
 
-       status->signal = desc->rssi;
+       status->signal = ((desc->rssi & RSSI_LEVEL_BITMASK) | BIT(7));
+       status->antenna = ((desc->rssi & ANT_DIVERSITY_BITMASK) >> 7);
 
        /*
         * FIXME: In wl1251, the SNR should be divided by two.  In wl1271 we
index a3b1618db27c202db4377c8e529aee4751c8c32a..f5a7087cfb97831fea0296186d59b3acbc0d7ac5 100644 (file)
@@ -30,6 +30,9 @@
 #define WL1271_RX_MAX_RSSI -30
 #define WL1271_RX_MIN_RSSI -95
 
+#define RSSI_LEVEL_BITMASK     0x7F
+#define ANT_DIVERSITY_BITMASK  BIT(7)
+
 #define SHORT_PREAMBLE_BIT   BIT(0)
 #define OFDM_RATE_BIT        BIT(6)
 #define PBCC_RATE_BIT        BIT(7)
index ea7e07abca4ebaccd37eaa1aac75567453b3e0a0..c172da56b550b8a59f88f5206a0160c1a878f5bb 100644 (file)
@@ -293,7 +293,8 @@ static int wl1271_probe(struct sdio_func *func,
        /* Use block mode for transferring over one block size of data */
        func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
 
-       if (wlcore_probe_of(&func->dev, &irq, &pdev_data))
+       ret = wlcore_probe_of(&func->dev, &irq, &pdev_data);
+       if (ret)
                goto out_free_glue;
 
        /* if sdio can keep power while host is suspended, enable wow */
index 8a495b318b6f23bf66b19f4d77e557506cab5b0f..c6cb85a85c896fd6dcab466fa80f6dc2b81e8e04 100644 (file)
@@ -325,9 +325,6 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
                queue->pending_prod + queue->pending_cons;
 }
 
-/* Callback from stack when TX packet can be released */
-void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
-
 irqreturn_t xenvif_interrupt(int irq, void *dev_id);
 
 extern bool separate_tx_rx_irq;
index fdc60db608291b7e1fd81c6a3d774ba9c9c77467..7c8c23cc6896ca300e31c1d99801e168cb3615de 100644 (file)
@@ -266,7 +266,8 @@ EXPORT_SYMBOL(of_phy_attach);
 bool of_phy_is_fixed_link(struct device_node *np)
 {
        struct device_node *dn;
-       int len;
+       int len, err;
+       const char *managed;
 
        /* New binding */
        dn = of_get_child_by_name(np, "fixed-link");
@@ -275,6 +276,10 @@ bool of_phy_is_fixed_link(struct device_node *np)
                return true;
        }
 
+       err = of_property_read_string(np, "managed", &managed);
+       if (err == 0 && strcmp(managed, "auto") != 0)
+               return true;
+
        /* Old binding */
        if (of_get_property(np, "fixed-link", &len) &&
            len == (5 * sizeof(__be32)))
@@ -289,8 +294,18 @@ int of_phy_register_fixed_link(struct device_node *np)
        struct fixed_phy_status status = {};
        struct device_node *fixed_link_node;
        const __be32 *fixed_link_prop;
-       int len;
+       int len, err;
        struct phy_device *phy;
+       const char *managed;
+
+       err = of_property_read_string(np, "managed", &managed);
+       if (err == 0) {
+               if (strcmp(managed, "in-band-status") == 0) {
+                       /* status is zeroed, namely its .link member */
+                       phy = fixed_phy_register(PHY_POLL, &status, np);
+                       return IS_ERR(phy) ? PTR_ERR(phy) : 0;
+               }
+       }
 
        /* New binding */
        fixed_link_node = of_get_child_by_name(np, "fixed-link");
index 4383476a0d4814beb6c03d32feb52e8ce94a0014..139d6d2e123fb0c69bbe31705b08b637c020ac8d 100644 (file)
@@ -192,5 +192,7 @@ extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
 extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
 extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
 extern const struct bpf_func_proto bpf_get_current_comm_proto;
+extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
+extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
 
 #endif /* _LINUX_BPF_H */
index 17724f6ea983c9c5ac8fecb2069ec1d90b451168..fa2cab985e577681c801f8861c299ad938ec9ffc 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/linkage.h>
 #include <linux/printk.h>
 #include <linux/workqueue.h>
+#include <linux/sched.h>
 
 #include <asm/cacheflush.h>
 
@@ -354,6 +355,16 @@ static inline unsigned int bpf_prog_size(unsigned int proglen)
                   offsetof(struct bpf_prog, insns[proglen]));
 }
 
+static inline bool bpf_prog_was_classic(const struct bpf_prog *prog)
+{
+       /* When classic BPF programs have been loaded and the arch
+        * does not have a classic BPF JIT (anymore), they have been
+        * converted via bpf_migrate_filter() to eBPF and thus always
+        * have an unspec program type.
+        */
+       return prog->type == BPF_PROG_TYPE_UNSPEC;
+}
+
 #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
 
 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
@@ -411,6 +422,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
 
 u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 void bpf_int_jit_compile(struct bpf_prog *fp);
+bool bpf_helper_changes_skb_data(void *func);
 
 #ifdef CONFIG_BPF_JIT
 typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
@@ -427,8 +439,9 @@ void bpf_jit_free(struct bpf_prog *fp);
 static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
                                u32 pass, void *image)
 {
-       pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
-              flen, proglen, pass, image);
+       pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen,
+              proglen, pass, image, current->comm, task_pid_nr(current));
+
        if (image)
                print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
                               16, 1, image, proglen, false);
index 82806c60aa4273d67ff5592dfc11e4eaa57c51d7..cb9dcad72372150f2ad0d76c229d5f63b07f74ff 100644 (file)
@@ -29,6 +29,7 @@ struct ipv6_devconf {
        __s32           max_desync_factor;
        __s32           max_addresses;
        __s32           accept_ra_defrtr;
+       __s32           accept_ra_min_hop_limit;
        __s32           accept_ra_pinfo;
 #ifdef CONFIG_IPV6_ROUTER_PREF
        __s32           accept_ra_rtr_pref;
@@ -57,6 +58,7 @@ struct ipv6_devconf {
                bool initialized;
                struct in6_addr secret;
        } stable_secret;
+       __s32           use_oif_addrs_only;
        void            *sysctl;
 };
 
@@ -94,7 +96,6 @@ static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb)
 struct inet6_skb_parm {
        int                     iif;
        __be16                  ra;
-       __u16                   hop;
        __u16                   dst0;
        __u16                   srcrt;
        __u16                   dst1;
@@ -111,6 +112,7 @@ struct inet6_skb_parm {
 #define IP6SKB_REROUTED                4
 #define IP6SKB_ROUTERALERT     8
 #define IP6SKB_FRAGMENTED      16
+#define IP6SKB_HOPBYHOP        32
 };
 
 #define IP6CB(skb)     ((struct inet6_skb_parm*)((skb)->cb))
index e7ecc12a11636f1afe2f591e019da293ed1f517a..09cebe528488e18c8561b27f2f18eade1f51c356 100644 (file)
@@ -88,7 +88,8 @@ struct mlx4_ts_cqe {
 
 enum {
        MLX4_CQE_L2_TUNNEL_IPOK         = 1 << 31,
-       MLX4_CQE_VLAN_PRESENT_MASK      = 1 << 29,
+       MLX4_CQE_CVLAN_PRESENT_MASK     = 1 << 29,
+       MLX4_CQE_SVLAN_PRESENT_MASK     = 1 << 30,
        MLX4_CQE_L2_TUNNEL              = 1 << 27,
        MLX4_CQE_L2_TUNNEL_CSUM         = 1 << 26,
        MLX4_CQE_L2_TUNNEL_IPV4         = 1 << 25,
index fd13c1ce3b4abf797a4a720c9dd567587a477274..bcbf8c72a77bee6ef2acc96489a4559bf02467ee 100644 (file)
@@ -211,6 +211,8 @@ enum {
        MLX4_DEV_CAP_FLAG2_ETS_CFG              = 1LL <<  26,
        MLX4_DEV_CAP_FLAG2_PORT_BEACON          = 1LL <<  27,
        MLX4_DEV_CAP_FLAG2_IGNORE_FCS           = 1LL <<  28,
+       MLX4_DEV_CAP_FLAG2_PHV_EN               = 1LL <<  29,
+       MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN      = 1LL <<  30,
 };
 
 enum {
@@ -581,6 +583,7 @@ struct mlx4_caps {
        u64                     phys_port_id[MLX4_MAX_PORTS + 1];
        int                     tunnel_offload_mode;
        u8                      rx_checksum_flags_port[MLX4_MAX_PORTS + 1];
+       u8                      phv_bit[MLX4_MAX_PORTS + 1];
        u8                      alloc_res_qp_mask;
        u32                     dmfs_high_rate_qpn_base;
        u32                     dmfs_high_rate_qpn_range;
@@ -1332,6 +1335,8 @@ int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
 int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
                            u8 ignore_fcs_value);
 int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
+int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
+int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
 int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
index 6fed539e54569c3f0701632a8a17e9bdf16a53d6..de45a51b3f041d28e644cc6a0ce011d6cd2d4fb6 100644 (file)
@@ -272,7 +272,8 @@ enum {
        MLX4_WQE_CTRL_SOLICITED         = 1 << 1,
        MLX4_WQE_CTRL_IP_CSUM           = 1 << 4,
        MLX4_WQE_CTRL_TCP_UDP_CSUM      = 1 << 5,
-       MLX4_WQE_CTRL_INS_VLAN          = 1 << 6,
+       MLX4_WQE_CTRL_INS_CVLAN         = 1 << 6,
+       MLX4_WQE_CTRL_INS_SVLAN         = 1 << 7,
        MLX4_WQE_CTRL_STRONG_ORDER      = 1 << 7,
        MLX4_WQE_CTRL_FORCE_LOOPBACK    = 1 << 0,
 };
index 5722d88c24290358a928914c48fafec9a886b1c7..5fe0cae1a515567fb59b42e9b7ba49e9033826f5 100644 (file)
@@ -380,7 +380,7 @@ struct mlx5_uar {
        u32                     index;
        struct list_head        bf_list;
        unsigned                free_bf_bmap;
-       void __iomem           *wc_map;
+       void __iomem           *bf_map;
        void __iomem           *map;
 };
 
@@ -435,6 +435,8 @@ struct mlx5_priv {
        struct mlx5_uuar_info   uuari;
        MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
 
+       struct io_mapping       *bf_mapping;
+
        /* pages stuff */
        struct workqueue_struct *pg_wq;
        struct rb_root          page_root;
@@ -463,6 +465,10 @@ struct mlx5_priv {
        /* end: mr staff */
 
        /* start: alloc staff */
+       /* protect buffer alocation according to numa node */
+       struct mutex            alloc_mutex;
+       int                     numa_node;
+
        struct mutex            pgdir_mutex;
        struct list_head        pgdir_list;
        /* end: alloc staff */
@@ -672,6 +678,8 @@ void mlx5_health_cleanup(void);
 void  __init mlx5_health_init(void);
 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
 void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+                       struct mlx5_buf *buf, int node);
 int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
 void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
 struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
@@ -773,6 +781,8 @@ void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
 int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
 int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
+int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
+                      int node);
 void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
 
 const char *mlx5_command_str(int command);
index 6d2f6fee041cd4f663fd7d6898b8ed8418a95b73..c60a62bba652c112517abefdaf60433437780601 100644 (file)
@@ -1936,9 +1936,9 @@ enum {
 };
 
 enum {
-       MLX5_TIRC_RX_HASH_FN_HASH_NONE           = 0x0,
-       MLX5_TIRC_RX_HASH_FN_HASH_INVERTED_XOR8  = 0x1,
-       MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ       = 0x2,
+       MLX5_RX_HASH_FN_NONE           = 0x0,
+       MLX5_RX_HASH_FN_INVERTED_XOR8  = 0x1,
+       MLX5_RX_HASH_FN_TOEPLITZ       = 0x2,
 };
 
 enum {
diff --git a/include/linux/mpls_iptunnel.h b/include/linux/mpls_iptunnel.h
new file mode 100644 (file)
index 0000000..ef29eb2
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef _LINUX_MPLS_IPTUNNEL_H
+#define _LINUX_MPLS_IPTUNNEL_H
+
+#include <uapi/linux/mpls_iptunnel.h>
+
+#endif  /* _LINUX_MPLS_IPTUNNEL_H */
index e20979dfd6a99688a696779b8952ab66bc143739..607b5f41f46f93e506adbefd7b3ed11ed8acfb67 100644 (file)
@@ -766,6 +766,13 @@ struct netdev_phys_item_id {
        unsigned char id_len;
 };
 
+static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
+                                           struct netdev_phys_item_id *b)
+{
+       return a->id_len == b->id_len &&
+              memcmp(a->id, b->id, a->id_len) == 0;
+}
+
 typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
                                       struct sk_buff *skb);
 
@@ -1041,6 +1048,12 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  *     TX queue.
  * int (*ndo_get_iflink)(const struct net_device *dev);
  *     Called to get the iflink value of this device.
+ * void (*ndo_change_proto_down)(struct net_device *dev,
+ *                               bool proto_down);
+ *     This function is used to pass protocol port error state information
+ *     to the switch driver. The switch driver can react to the proto_down
+ *      by doing a phys down on the associated switch port.
+ *
  */
 struct net_device_ops {
        int                     (*ndo_init)(struct net_device *dev);
@@ -1211,6 +1224,8 @@ struct net_device_ops {
                                                      int queue_index,
                                                      u32 maxrate);
        int                     (*ndo_get_iflink)(const struct net_device *dev);
+       int                     (*ndo_change_proto_down)(struct net_device *dev,
+                                                        bool proto_down);
 };
 
 /**
@@ -1448,6 +1463,8 @@ enum netdev_priv_flags {
  *
  *     @xps_maps:      XXX: need comments on this one
  *
+ *     @offload_fwd_mark:      Offload device fwding mark
+ *
  *     @trans_start:           Time (in jiffies) of last Tx
  *     @watchdog_timeo:        Represents the timeout that is used by
  *                             the watchdog ( see dev_watchdog() )
@@ -1502,6 +1519,10 @@ enum netdev_priv_flags {
  *
  *     @qdisc_tx_busylock:     XXX: need comments on this one
  *
+ *     @proto_down:    protocol port state information can be sent to the
+ *                     switch driver and used to set the phys state of the
+ *                     switch port.
+ *
  *     FIXME: cleanup struct net_device such that network protocol info
  *     moves out.
  */
@@ -1685,6 +1706,10 @@ struct net_device {
        struct xps_dev_maps __rcu *xps_maps;
 #endif
 
+#ifdef CONFIG_NET_SWITCHDEV
+       u32                     offload_fwd_mark;
+#endif
+
        /* These may be needed for future network-power-down code. */
 
        /*
@@ -1762,6 +1787,7 @@ struct net_device {
 #endif
        struct phy_device *phydev;
        struct lock_class_key *qdisc_tx_busylock;
+       bool proto_down;
 };
 #define to_net_dev(d) container_of(d, struct net_device, dev)
 
@@ -2982,6 +3008,7 @@ int dev_get_phys_port_id(struct net_device *dev,
                         struct netdev_phys_item_id *ppid);
 int dev_get_phys_port_name(struct net_device *dev,
                           char *name, size_t len);
+int dev_change_proto_down(struct net_device *dev, bool proto_down);
 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
 struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
                                    struct netdev_queue *txq, int *ret);
index a26c3f84b8ddc6c15e2abbecf47a588419534b11..e5fb1d4159619f7ecad0fca5515e627fd7016e06 100644 (file)
@@ -424,6 +424,8 @@ struct phy_device {
 
        struct net_device *attached_dev;
 
+       u8 mdix;
+
        void (*adjust_link)(struct net_device *dev);
 };
 #define to_phy_device(d) container_of(d, struct phy_device, dev)
index d6cdd6e87d53bcd1b4f390f61f73b1c91b076bdd..b7c1286e247d9c50951ad8d9d1256ccc9d691481 100644 (file)
@@ -37,6 +37,7 @@
 #include <net/flow_dissector.h>
 #include <linux/splice.h>
 #include <linux/in6.h>
+#include <net/flow.h>
 
 /* A. Checksumming of received packets by device.
  *
@@ -506,6 +507,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
  *     @no_fcs:  Request NIC to treat last 4 bytes as Ethernet FCS
   *    @napi_id: id of the NAPI struct this skb came from
  *     @secmark: security marking
+ *     @offload_fwd_mark: fwding offload mark
  *     @mark: Generic packet mark
  *     @vlan_proto: vlan encapsulation protocol
  *     @vlan_tci: vlan tag control information
@@ -650,9 +652,15 @@ struct sk_buff {
                unsigned int    sender_cpu;
        };
 #endif
+       union {
 #ifdef CONFIG_NETWORK_SECMARK
-       __u32                   secmark;
+               __u32           secmark;
+#endif
+#ifdef CONFIG_NET_SWITCHDEV
+               __u32           offload_fwd_mark;
 #endif
+       };
+
        union {
                __u32           mark;
                __u32           reserved_tailroom;
@@ -938,6 +946,26 @@ static inline __u32 skb_get_hash(struct sk_buff *skb)
        return skb->hash;
 }
 
+__u32 __skb_get_hash_flowi6(struct sk_buff *skb, struct flowi6 *fl6);
+
+static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, struct flowi6 *fl6)
+{
+       if (!skb->l4_hash && !skb->sw_hash)
+               __skb_get_hash_flowi6(skb, fl6);
+
+       return skb->hash;
+}
+
+__u32 __skb_get_hash_flowi4(struct sk_buff *skb, struct flowi4 *fl);
+
+static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, struct flowi4 *fl4)
+{
+       if (!skb->l4_hash && !skb->sw_hash)
+               __skb_get_hash_flowi4(skb, fl4);
+
+       return skb->hash;
+}
+
 __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
 
 static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
@@ -2671,12 +2699,6 @@ static inline void skb_frag_list_init(struct sk_buff *skb)
        skb_shinfo(skb)->frag_list = NULL;
 }
 
-static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
-{
-       frag->next = skb_shinfo(skb)->frag_list;
-       skb_shinfo(skb)->frag_list = frag;
-}
-
 #define skb_walk_frags(skb, iter)      \
        for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
 
@@ -3468,5 +3490,6 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
                               skb_network_header(skb);
        return hdr_len + skb_gso_transport_seglen(skb);
 }
+
 #endif /* __KERNEL__ */
 #endif /* _LINUX_SKBUFF_H */
index c735f5c91eead34520726a503538ec48046dde86..eead8ab93c0a36e402741ee767d3c3bc70128964 100644 (file)
@@ -119,30 +119,8 @@ struct plat_stmmacenet_data {
        int rx_fifo_size;
        void (*fix_mac_speed)(void *priv, unsigned int speed);
        void (*bus_setup)(void __iomem *ioaddr);
-       void *(*setup)(struct platform_device *pdev);
-       void (*free)(struct platform_device *pdev, void *priv);
        int (*init)(struct platform_device *pdev, void *priv);
        void (*exit)(struct platform_device *pdev, void *priv);
-       void *custom_cfg;
-       void *custom_data;
        void *bsp_priv;
 };
-
-/* of_data for SoC glue layer device tree bindings */
-
-struct stmmac_of_data {
-       int has_gmac;
-       int enh_desc;
-       int tx_coe;
-       int rx_coe;
-       int bugged_jumbo;
-       int pmt;
-       int riwt_off;
-       void (*fix_mac_speed)(void *priv, unsigned int speed);
-       void (*bus_setup)(void __iomem *ioaddr);
-       void *(*setup)(struct platform_device *pdev);
-       void (*free)(struct platform_device *pdev, void *priv);
-       int (*init)(struct platform_device *pdev, void *priv);
-       void (*exit)(struct platform_device *pdev, void *priv);
-};
 #endif
index 931738bc5bba3c999ef280f1f68e9256bf445233..4519c81304bd26ec6a6fe6520ecd8a1caeaba4dd 100644 (file)
@@ -21,6 +21,8 @@ struct tcf_common {
        struct gnet_stats_rate_est64    tcfc_rate_est;
        spinlock_t                      tcfc_lock;
        struct rcu_head                 tcfc_rcu;
+       struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+       struct gnet_stats_queue __percpu *cpu_qstats;
 };
 #define tcf_head       common.tcfc_head
 #define tcf_index      common.tcfc_index
@@ -68,6 +70,17 @@ static inline void tcf_hashinfo_destroy(struct tcf_hashinfo *hf)
        kfree(hf->htab);
 }
 
+/* Update lastuse only if needed, to avoid dirtying a cache line.
+ * We use a temp variable to avoid fetching jiffies twice.
+ */
+static inline void tcf_lastuse_update(struct tcf_t *tm)
+{
+       unsigned long now = jiffies;
+
+       if (tm->lastuse != now)
+               tm->lastuse = now;
+}
+
 #ifdef CONFIG_NET_CLS_ACT
 
 #define ACT_P_CREATED 1
@@ -102,7 +115,7 @@ void tcf_hash_destroy(struct tc_action *a);
 u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
 int tcf_hash_check(u32 index, struct tc_action *a, int bind);
 int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
-                   int size, int bind);
+                   int size, int bind, bool cpustats);
 void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
 void tcf_hash_insert(struct tc_action *a);
 
index def59d3a34d5e24bda47e4526f7050c73a164cd7..0c3ac5acb85f5d3ce0d4cc1520dbe399143975cd 100644 (file)
@@ -158,8 +158,8 @@ struct ipv6_stub {
                                 const struct in6_addr *addr);
        int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
                                 const struct in6_addr *addr);
-       int (*ipv6_dst_lookup)(struct sock *sk, struct dst_entry **dst,
-                               struct flowi6 *fl6);
+       int (*ipv6_dst_lookup)(struct net *net, struct sock *sk,
+                              struct dst_entry **dst, struct flowi6 *fl6);
        void (*udpv6_encap_enable)(void);
        void (*ndisc_send_na)(struct net_device *dev, struct neighbour *neigh,
                              const struct in6_addr *daddr,
index 3bd618d3e55dcf8735fb878a1d59bc8cf18bcc2d..2a6b0919e23f71af5f4660fce0a349bfa09b2fd9 100644 (file)
@@ -1297,7 +1297,7 @@ static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
        if (max >= to_multiplier * 8)
                return -EINVAL;
 
-       max_latency = (to_multiplier * 8 / max) - 1;
+       max_latency = (to_multiplier * 4 / max) - 1;
        if (latency > 499 || latency > max_latency)
                return -EINVAL;
 
index 2239a37530922682008d9bbe312e6cbfbe97fd4d..c98afc08cc2612e046cd070b22d25aa18a88c457 100644 (file)
@@ -55,6 +55,8 @@
 #define L2CAP_INFO_TIMEOUT             msecs_to_jiffies(4000)
 #define L2CAP_MOVE_TIMEOUT             msecs_to_jiffies(4000)
 #define L2CAP_MOVE_ERTX_TIMEOUT                msecs_to_jiffies(60000)
+#define L2CAP_WAIT_ACK_POLL_PERIOD     msecs_to_jiffies(200)
+#define L2CAP_WAIT_ACK_TIMEOUT         msecs_to_jiffies(10000)
 
 #define L2CAP_A2MP_DEFAULT_MTU         670
 
index c28aca25320ebaa7c02172fbfee4827a5d207d6b..1797235cd590c361eb216d33558b95e441087d3e 100644 (file)
@@ -66,6 +66,7 @@ enum {
        BOND_OPT_AD_ACTOR_SYS_PRIO,
        BOND_OPT_AD_ACTOR_SYSTEM,
        BOND_OPT_AD_USER_PORT_KEY,
+       BOND_OPT_NUM_PEER_NOTIF_ALIAS,
        BOND_OPT_LAST
 };
 
index 290a9a69af0788794619b0ededc4a6ccfbab5e07..382f94b59f2f706eab23f2f6ebe9c2f007c5ed2e 100644 (file)
@@ -34,6 +34,8 @@ struct cfg802154_ops {
                                                           int type);
        void    (*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
                                               struct net_device *dev);
+       int     (*suspend)(struct wpan_phy *wpan_phy);
+       int     (*resume)(struct wpan_phy *wpan_phy);
        int     (*add_virtual_intf)(struct wpan_phy *wpan_phy,
                                    const char *name,
                                    unsigned char name_assign_type,
index c15d39456e146196b24bafed159099baa57e03e9..ccd6d8bffa4d8d0744c70c3591f948d6520b634a 100644 (file)
@@ -49,9 +49,38 @@ static inline void sock_update_classid(struct sock *sk)
        if (classid != sk->sk_classid)
                sk->sk_classid = classid;
 }
+
+static inline u32 task_get_classid(const struct sk_buff *skb)
+{
+       u32 classid = task_cls_state(current)->classid;
+
+       /* Due to the nature of the classifier it is required to ignore all
+        * packets originating from softirq context as accessing `current'
+        * would lead to false results.
+        *
+        * This test assumes that all callers of dev_queue_xmit() explicitly
+        * disable bh. Knowing this, it is possible to detect softirq based
+        * calls by looking at the number of nested bh disable calls because
+        * softirqs always disables bh.
+        */
+       if (in_serving_softirq()) {
+               /* If there is an sk_classid we'll use that. */
+               if (!skb->sk)
+                       return 0;
+
+               classid = skb->sk->sk_classid;
+       }
+
+       return classid;
+}
 #else /* !CONFIG_CGROUP_NET_CLASSID */
 static inline void sock_update_classid(struct sock *sk)
 {
 }
+
+static inline u32 task_get_classid(const struct sk_buff *skb)
+{
+       return 0;
+}
 #endif /* CONFIG_CGROUP_NET_CLASSID */
 #endif  /* _NET_CLS_CGROUP_H */
index 2bc73f8a00a9c4d20848a578eca44b99cf1b7281..2578811cef5167e94269bb9967f6b025c824084a 100644 (file)
@@ -57,6 +57,7 @@ struct dst_entry {
 #define DST_FAKE_RTABLE                0x0040
 #define DST_XFRM_TUNNEL                0x0080
 #define DST_XFRM_QUEUE         0x0100
+#define DST_METADATA           0x0200
 
        unsigned short          pending_confirm;
 
@@ -356,6 +357,9 @@ static inline int dst_discard(struct sk_buff *skb)
 }
 void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref,
                int initial_obsolete, unsigned short flags);
+void dst_init(struct dst_entry *dst, struct dst_ops *ops,
+             struct net_device *dev, int initial_ref, int initial_obsolete,
+             unsigned short flags);
 void __dst_free(struct dst_entry *dst);
 struct dst_entry *dst_destroy(struct dst_entry *dst);
 
@@ -457,7 +461,7 @@ static inline struct dst_entry *dst_check(struct dst_entry *dst, u32 cookie)
        return dst;
 }
 
-void dst_init(void);
+void dst_subsys_init(void);
 
 /* Flags for xfrm_lookup flags argument. */
 enum {
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
new file mode 100644 (file)
index 0000000..075f523
--- /dev/null
@@ -0,0 +1,56 @@
+#ifndef __NET_DST_METADATA_H
+#define __NET_DST_METADATA_H 1
+
+#include <linux/skbuff.h>
+#include <net/ip_tunnels.h>
+#include <net/dst.h>
+
+struct metadata_dst {
+       struct dst_entry                dst;
+       size_t                          opts_len;
+       union {
+               struct ip_tunnel_info   tun_info;
+       } u;
+};
+
+static inline struct metadata_dst *skb_metadata_dst(struct sk_buff *skb)
+{
+       struct metadata_dst *md_dst = (struct metadata_dst *) skb_dst(skb);
+
+       if (md_dst && md_dst->dst.flags & DST_METADATA)
+               return md_dst;
+
+       return NULL;
+}
+
+static inline struct ip_tunnel_info *skb_tunnel_info(struct sk_buff *skb,
+                                                    int family)
+{
+       struct metadata_dst *md_dst = skb_metadata_dst(skb);
+       struct rtable *rt;
+
+       if (md_dst)
+               return &md_dst->u.tun_info;
+
+       switch (family) {
+       case AF_INET:
+               rt = (struct rtable *)skb_dst(skb);
+               if (rt && rt->rt_lwtstate)
+                       return lwt_tun_info(rt->rt_lwtstate);
+               break;
+       }
+
+       return NULL;
+}
+
+static inline bool skb_valid_dst(const struct sk_buff *skb)
+{
+       struct dst_entry *dst = skb_dst(skb);
+
+       return dst && !(dst->flags & DST_METADATA);
+}
+
+struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags);
+struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags);
+
+#endif /* __NET_DST_METADATA_H */
index 903a55efbffe50a3507f79b1b5e4b49405f31aa8..4e8f804f45898aae228e7b9f60fdda9e3666271d 100644 (file)
@@ -19,6 +19,7 @@ struct fib_rule {
        u8                      action;
        /* 3 bytes hole, try to use */
        u32                     target;
+       __be64                  tun_id;
        struct fib_rule __rcu   *ctarget;
        struct net              *fr_net;
 
index 8109a159d1b3ba5ced3aa6d2bc10b9f01d274520..3098ae33a1784f920e26dd445a041e9deac1888e 100644 (file)
 
 #define LOOPBACK_IFINDEX       1
 
+struct flowi_tunnel {
+       __be64                  tun_id;
+};
+
 struct flowi_common {
        int     flowic_oif;
        int     flowic_iif;
@@ -30,6 +34,7 @@ struct flowi_common {
 #define FLOWI_FLAG_ANYSRC              0x01
 #define FLOWI_FLAG_KNOWN_NH            0x02
        __u32   flowic_secid;
+       struct flowi_tunnel flowic_tun_key;
 };
 
 union flowi_uli {
@@ -66,6 +71,7 @@ struct flowi4 {
 #define flowi4_proto           __fl_common.flowic_proto
 #define flowi4_flags           __fl_common.flowic_flags
 #define flowi4_secid           __fl_common.flowic_secid
+#define flowi4_tun_key         __fl_common.flowic_tun_key
 
        /* (saddr,daddr) must be grouped, same order as in IP header */
        __be32                  saddr;
@@ -95,6 +101,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
        fl4->flowi4_proto = proto;
        fl4->flowi4_flags = flags;
        fl4->flowi4_secid = 0;
+       fl4->flowi4_tun_key.tun_id = 0;
        fl4->daddr = daddr;
        fl4->saddr = saddr;
        fl4->fl4_dport = dport;
@@ -165,6 +172,7 @@ struct flowi {
 #define flowi_proto    u.__fl_common.flowic_proto
 #define flowi_flags    u.__fl_common.flowic_flags
 #define flowi_secid    u.__fl_common.flowic_secid
+#define flowi_tun_key  u.__fl_common.flowic_tun_key
 } __attribute__((__aligned__(BITS_PER_LONG/8)));
 
 static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
index b73c88a19dd408f0de41f87c80242816fac4b19d..b07d126694a7aa5d5910e2d4126522aebd602a98 100644 (file)
@@ -205,8 +205,8 @@ void inet_put_port(struct sock *sk);
 
 void inet_hashinfo_init(struct inet_hashinfo *h);
 
-int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw);
-int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw);
+void __inet_hash_nolisten(struct sock *sk, struct sock *osk);
+void __inet_hash(struct sock *sk, struct sock *osk);
 void inet_hash(struct sock *sk);
 void inet_unhash(struct sock *sk);
 
index 360c4802288db91a38b435bcf5b5d2eb71a8cd1f..879d6e5a973b4ae1af54d6b0c6103c02ee774991 100644 (file)
@@ -100,10 +100,8 @@ static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
 void inet_twsk_free(struct inet_timewait_sock *tw);
 void inet_twsk_put(struct inet_timewait_sock *tw);
 
-int inet_twsk_unhash(struct inet_timewait_sock *tw);
-
-int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
-                         struct inet_hashinfo *hashinfo);
+void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+                          struct inet_hashinfo *hashinfo);
 
 struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
                                           struct inet_timewait_death_row *dr,
@@ -113,7 +111,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
                           struct inet_hashinfo *hashinfo);
 
 void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo);
-void inet_twsk_deschedule(struct inet_timewait_sock *tw);
+void inet_twsk_deschedule_put(struct inet_timewait_sock *tw);
 
 void inet_twsk_purge(struct inet_hashinfo *hashinfo,
                     struct inet_timewait_death_row *twdr, int family);
index d5fe9f2ab6996f0aa9482980ecc4cdc793d63a5a..bee5f3582e38873e8e773e31c8ccda454f249234 100644 (file)
@@ -370,22 +370,6 @@ static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
        flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
 }
 
-static inline void inet_set_txhash(struct sock *sk)
-{
-       struct inet_sock *inet = inet_sk(sk);
-       struct flow_keys keys;
-
-       memset(&keys, 0, sizeof(keys));
-
-       keys.addrs.v4addrs.src = inet->inet_saddr;
-       keys.addrs.v4addrs.dst = inet->inet_daddr;
-       keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
-       keys.ports.src = inet->inet_sport;
-       keys.ports.dst = inet->inet_dport;
-
-       sk->sk_txhash = flow_hash_from_keys(&keys);
-}
-
 static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
 {
        const struct iphdr *iph = skb_gro_network_header(skb);
index 3b76849c190fc2ce79b59d07466a05182d2b99fe..276328e3daa64a0d493a141e5690eb6dfe18c805 100644 (file)
@@ -51,6 +51,8 @@ struct fib6_config {
        struct nlattr   *fc_mp;
 
        struct nl_info  fc_nlinfo;
+       struct nlattr   *fc_encap;
+       u16             fc_encap_type;
 };
 
 struct fib6_node {
@@ -131,6 +133,7 @@ struct rt6_info {
        /* more non-fragment space at head required */
        unsigned short                  rt6i_nfheader_len;
        u8                              rt6i_protocol;
+       struct lwtunnel_state           *rt6i_lwtstate;
 };
 
 static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
index 5fa643b4e8913a35b3a3ead9ad66fa74d368ef70..a37d0432bebda440708516c7f270f931764676dd 100644 (file)
@@ -44,7 +44,9 @@ struct fib_config {
        u32                     fc_flow;
        u32                     fc_nlflags;
        struct nl_info          fc_nlinfo;
- };
+       struct nlattr           *fc_encap;
+       u16                     fc_encap_type;
+};
 
 struct fib_info;
 struct rtable;
@@ -89,6 +91,7 @@ struct fib_nh {
        struct rtable __rcu * __percpu *nh_pcpu_rth_output;
        struct rtable __rcu     *nh_rth_input;
        struct fnhe_hash_bucket __rcu *nh_exceptions;
+       struct lwtunnel_state   *nh_lwtstate;
 };
 
 /*
index d8214cb88bbcfa6524a7d1900c543a45a05f7f31..47984415f5d1e7758d6a1c8b0feaeb55f902a7dd 100644 (file)
@@ -9,9 +9,9 @@
 #include <net/dsfield.h>
 #include <net/gro_cells.h>
 #include <net/inet_ecn.h>
-#include <net/ip.h>
 #include <net/netns/generic.h>
 #include <net/rtnetlink.h>
+#include <net/lwtunnel.h>
 
 #if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
 /* Keep error state on tunnel for 30 sec */
 #define IPTUNNEL_ERR_TIMEO     (30*HZ)
 
+/* Used to memset ip_tunnel padding. */
+#define IP_TUNNEL_KEY_SIZE                                     \
+       (offsetof(struct ip_tunnel_key, tp_dst) +               \
+        FIELD_SIZEOF(struct ip_tunnel_key, tp_dst))
+
+struct ip_tunnel_key {
+       __be64                  tun_id;
+       __be32                  ipv4_src;
+       __be32                  ipv4_dst;
+       __be16                  tun_flags;
+       __u8                    ipv4_tos;
+       __u8                    ipv4_ttl;
+       __be16                  tp_src;
+       __be16                  tp_dst;
+} __packed __aligned(4); /* Minimize padding. */
+
+/* Indicates whether the tunnel info structure represents receive
+ * or transmit tunnel parameters.
+ */
+enum {
+       IP_TUNNEL_INFO_RX,
+       IP_TUNNEL_INFO_TX,
+};
+
+struct ip_tunnel_info {
+       struct ip_tunnel_key    key;
+       const void              *options;
+       u8                      options_len;
+       u8                      mode;
+};
+
 /* 6rd prefix/relay information */
 #ifdef CONFIG_IPV6_SIT_6RD
 struct ip_tunnel_6rd_parm {
@@ -136,6 +167,47 @@ int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *op,
 int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
                            unsigned int num);
 
+static inline void __ip_tunnel_info_init(struct ip_tunnel_info *tun_info,
+                                        __be32 saddr, __be32 daddr,
+                                        u8 tos, u8 ttl,
+                                        __be16 tp_src, __be16 tp_dst,
+                                        __be64 tun_id, __be16 tun_flags,
+                                        const void *opts, u8 opts_len)
+{
+       tun_info->key.tun_id = tun_id;
+       tun_info->key.ipv4_src = saddr;
+       tun_info->key.ipv4_dst = daddr;
+       tun_info->key.ipv4_tos = tos;
+       tun_info->key.ipv4_ttl = ttl;
+       tun_info->key.tun_flags = tun_flags;
+
+       /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
+        * the upper tunnel are used.
+        * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
+        */
+       tun_info->key.tp_src = tp_src;
+       tun_info->key.tp_dst = tp_dst;
+
+       /* Clear struct padding. */
+       if (sizeof(tun_info->key) != IP_TUNNEL_KEY_SIZE)
+               memset((unsigned char *)&tun_info->key + IP_TUNNEL_KEY_SIZE,
+                      0, sizeof(tun_info->key) - IP_TUNNEL_KEY_SIZE);
+
+       tun_info->options = opts;
+       tun_info->options_len = opts_len;
+}
+
+static inline void ip_tunnel_info_init(struct ip_tunnel_info *tun_info,
+                                      const struct iphdr *iph,
+                                      __be16 tp_src, __be16 tp_dst,
+                                      __be64 tun_id, __be16 tun_flags,
+                                      const void *opts, u8 opts_len)
+{
+       __ip_tunnel_info_init(tun_info, iph->saddr, iph->daddr,
+                             iph->tos, iph->ttl, tp_src, tp_dst,
+                             tun_id, tun_flags, opts, opts_len);
+}
+
 #ifdef CONFIG_INET
 
 int ip_tunnel_init(struct net_device *dev);
@@ -221,6 +293,44 @@ static inline void iptunnel_xmit_stats(int err,
        }
 }
 
+static inline void *ip_tunnel_info_opts(struct ip_tunnel_info *info, size_t n)
+{
+       return info + 1;
+}
+
+static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
+{
+       return (struct ip_tunnel_info *)lwtstate->data;
+}
+
+extern struct static_key ip_tunnel_metadata_cnt;
+
+/* Returns > 0 if metadata should be collected */
+static inline int ip_tunnel_collect_metadata(void)
+{
+       return static_key_false(&ip_tunnel_metadata_cnt);
+}
+
+void __init ip_tunnel_core_init(void);
+
+void ip_tunnel_need_metadata(void);
+void ip_tunnel_unneed_metadata(void);
+
+#else /* CONFIG_INET */
+
+static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
+{
+       return NULL;
+}
+
+static inline void ip_tunnel_need_metadata(void)
+{
+}
+
+static inline void ip_tunnel_unneed_metadata(void)
+{
+}
+
 #endif /* CONFIG_INET */
 
 #endif /* __NET_IP_TUNNELS_H */
index 82dbdb092a5d1c43d088fea8055c1bcafee156c5..711cca428cc8cd56b40de704265ad73262a6d2a4 100644 (file)
@@ -707,54 +707,69 @@ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
-static inline void ip6_set_txhash(struct sock *sk)
-{
-       struct inet_sock *inet = inet_sk(sk);
-       struct ipv6_pinfo *np = inet6_sk(sk);
-       struct flow_keys keys;
 
-       memset(&keys, 0, sizeof(keys));
+/* Sysctl settings for net ipv6.auto_flowlabels */
+#define IP6_AUTO_FLOW_LABEL_OFF                0
+#define IP6_AUTO_FLOW_LABEL_OPTOUT     1
+#define IP6_AUTO_FLOW_LABEL_OPTIN      2
+#define IP6_AUTO_FLOW_LABEL_FORCED     3
 
-       memcpy(&keys.addrs.v6addrs.src, &np->saddr,
-              sizeof(keys.addrs.v6addrs.src));
-       memcpy(&keys.addrs.v6addrs.dst, &sk->sk_v6_daddr,
-              sizeof(keys.addrs.v6addrs.dst));
-       keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
-       keys.ports.src = inet->inet_sport;
-       keys.ports.dst = inet->inet_dport;
+#define IP6_AUTO_FLOW_LABEL_MAX                IP6_AUTO_FLOW_LABEL_FORCED
 
-       sk->sk_txhash = flow_hash_from_keys(&keys);
-}
+#define IP6_DEFAULT_AUTO_FLOW_LABELS   IP6_AUTO_FLOW_LABEL_OPTOUT
 
 static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
-                                       __be32 flowlabel, bool autolabel)
+                                       __be32 flowlabel, bool autolabel,
+                                       struct flowi6 *fl6)
 {
-       if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) {
-               u32 hash;
+       u32 hash;
+
+       if (flowlabel ||
+           net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
+           (!autolabel &&
+            net->ipv6.sysctl.auto_flowlabels != IP6_AUTO_FLOW_LABEL_FORCED))
+               return flowlabel;
 
-               hash = skb_get_hash(skb);
+       hash = skb_get_hash_flowi6(skb, fl6);
 
-               /* Since this is being sent on the wire obfuscate hash a bit
-                * to minimize possbility that any useful information to an
-                * attacker is leaked. Only lower 20 bits are relevant.
-                */
-               hash ^= hash >> 12;
+       /* Since this is being sent on the wire obfuscate hash a bit
+        * to minimize possbility that any useful information to an
+        * attacker is leaked. Only lower 20 bits are relevant.
+        */
+       rol32(hash, 16);
 
-               flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
+       flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
 
-               if (net->ipv6.sysctl.flowlabel_state_ranges)
-                       flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
-       }
+       if (net->ipv6.sysctl.flowlabel_state_ranges)
+               flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG;
 
        return flowlabel;
 }
+
+static inline int ip6_default_np_autolabel(struct net *net)
+{
+       switch (net->ipv6.sysctl.auto_flowlabels) {
+       case IP6_AUTO_FLOW_LABEL_OFF:
+       case IP6_AUTO_FLOW_LABEL_OPTIN:
+       default:
+               return 0;
+       case IP6_AUTO_FLOW_LABEL_OPTOUT:
+       case IP6_AUTO_FLOW_LABEL_FORCED:
+               return 1;
+       }
+}
 #else
 static inline void ip6_set_txhash(struct sock *sk) { }
 static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
-                                       __be32 flowlabel, bool autolabel)
+                                       __be32 flowlabel, bool autolabel,
+                                       struct flowi6 *fl6)
 {
        return flowlabel;
 }
+static inline int ip6_default_np_autolabel(struct net *net)
+{
+       return 0;
+}
 #endif
 
 
@@ -832,7 +847,8 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
                              &inet6_sk(sk)->cork);
 }
 
-int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6);
+int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
+                  struct flowi6 *fl6);
 struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
                                      const struct in6_addr *final_dst);
 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
new file mode 100644 (file)
index 0000000..33bd309
--- /dev/null
@@ -0,0 +1,147 @@
+#ifndef __NET_LWTUNNEL_H
+#define __NET_LWTUNNEL_H 1
+
+#include <linux/lwtunnel.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <net/route.h>
+
+#define LWTUNNEL_HASH_BITS   7
+#define LWTUNNEL_HASH_SIZE   (1 << LWTUNNEL_HASH_BITS)
+
+/* lw tunnel state flags */
+#define LWTUNNEL_STATE_OUTPUT_REDIRECT 0x1
+
+struct lwtunnel_state {
+       __u16           type;
+       __u16           flags;
+       atomic_t        refcnt;
+       int             len;
+       __u8            data[0];
+};
+
+struct lwtunnel_encap_ops {
+       int (*build_state)(struct net_device *dev, struct nlattr *encap,
+                          struct lwtunnel_state **ts);
+       int (*output)(struct sock *sk, struct sk_buff *skb);
+       int (*fill_encap)(struct sk_buff *skb,
+                         struct lwtunnel_state *lwtstate);
+       int (*get_encap_size)(struct lwtunnel_state *lwtstate);
+       int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
+};
+
+#ifdef CONFIG_LWTUNNEL
+static inline struct lwtunnel_state *
+lwtstate_get(struct lwtunnel_state *lws)
+{
+       if (lws)
+               atomic_inc(&lws->refcnt);
+
+       return lws;
+}
+
+static inline void lwtstate_put(struct lwtunnel_state *lws)
+{
+       if (!lws)
+               return;
+
+       if (atomic_dec_and_test(&lws->refcnt))
+               kfree(lws);
+}
+
+static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
+{
+       if (lwtstate && (lwtstate->flags & LWTUNNEL_STATE_OUTPUT_REDIRECT))
+               return true;
+
+       return false;
+}
+
+int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
+                          unsigned int num);
+int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
+                          unsigned int num);
+int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+                        struct nlattr *encap,
+                        struct lwtunnel_state **lws);
+int lwtunnel_fill_encap(struct sk_buff *skb,
+                       struct lwtunnel_state *lwtstate);
+int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate);
+struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len);
+int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b);
+int lwtunnel_output(struct sock *sk, struct sk_buff *skb);
+int lwtunnel_output6(struct sock *sk, struct sk_buff *skb);
+
+#else
+
+static inline struct lwtunnel_state *
+lwtstate_get(struct lwtunnel_state *lws)
+{
+       return lws;
+}
+
+static inline void lwtstate_put(struct lwtunnel_state *lws)
+{
+}
+
+static inline bool lwtunnel_output_redirect(struct lwtunnel_state *lwtstate)
+{
+       return false;
+}
+
+static inline int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
+                                        unsigned int num)
+{
+       return -EOPNOTSUPP;
+
+}
+
+static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
+                                        unsigned int num)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+                                      struct nlattr *encap,
+                                      struct lwtunnel_state **lws)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_fill_encap(struct sk_buff *skb,
+                                     struct lwtunnel_state *lwtstate)
+{
+       return 0;
+}
+
+static inline int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate)
+{
+       return 0;
+}
+
+static inline struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len)
+{
+       return NULL;
+}
+
+static inline int lwtunnel_cmp_encap(struct lwtunnel_state *a,
+                                    struct lwtunnel_state *b)
+{
+       return 0;
+}
+
+static inline int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_output6(struct sock *sk, struct sk_buff *skb)
+{
+       return -EOPNOTSUPP;
+}
+
+#endif
+
+#endif /* __NET_LWTUNNEL_H */
index f534a46911dc3967e78700f30f8e7e47ca9acec8..b7f99615224bd05d7e4cb926aca27f14f44aab81 100644 (file)
@@ -320,23 +320,6 @@ int ieee802154_register_hw(struct ieee802154_hw *hw);
  */
 void ieee802154_unregister_hw(struct ieee802154_hw *hw);
 
-/**
- * ieee802154_rx - receive frame
- *
- * Use this function to hand received frames to mac802154. The receive
- * buffer in @skb must start with an IEEE 802.15.4 header. In case of a
- * paged @skb is used, the driver is recommended to put the ieee802154
- * header of the frame on the linear part of the @skb to avoid memory
- * allocation and/or memcpy by the stack.
- *
- * This function may not be called in IRQ context. Calls to this function
- * for a single hardware must be synchronized against each other.
- *
- * @hw: the hardware this frame came in on
- * @skb: the buffer to receive, owned by mac802154 after this call
- */
-void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb);
-
 /**
  * ieee802154_rx_irqsafe - receive frame
  *
diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h
new file mode 100644 (file)
index 0000000..4757997
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2015 Cumulus Networks, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _NET_MPLS_IPTUNNEL_H
+#define _NET_MPLS_IPTUNNEL_H 1
+
+#define MAX_NEW_LABELS 2
+
+struct mpls_iptunnel_encap {
+       u32     label[MAX_NEW_LABELS];
+       u32     labels;
+};
+
+static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate)
+{
+       return (struct mpls_iptunnel_encap *)lwtstate->data;
+}
+
+#endif
index 8d93544a2d2b5f21c7ed8b8137394df0758dbef3..c0368db6df54d78a1122c45aff412bcafa84b412 100644 (file)
@@ -31,6 +31,7 @@ struct netns_sysctl_ipv6 {
        int auto_flowlabels;
        int icmpv6_time;
        int anycast_src_echo_reply;
+       int ip_nonlocal_bind;
        int fwmark_reflect;
        int idgen_retries;
        int idgen_delay;
index fe22d03afb6a218b6b2dfa7d5329632b8d4936ae..2d45f419477fedadeef42818e7159c148dfd6b84 100644 (file)
@@ -66,6 +66,7 @@ struct rtable {
 
        struct list_head        rt_uncached;
        struct uncached_list    *rt_uncached_list;
+       struct lwtunnel_state   *rt_lwtstate;
 };
 
 static inline bool rt_is_input_route(const struct rtable *rt)
index 343d922d15c2ce0ce1a53dd55d7c006a42e15c99..18fdb98185ab36ae750a2d77078ea16ac87dd0e6 100644 (file)
@@ -141,6 +141,7 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
                                    unsigned char name_assign_type,
                                    const struct rtnl_link_ops *ops,
                                    struct nlattr *tb[]);
+int rtnl_delete_link(struct net_device *dev);
 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
 
 int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len);
index 2738f6f8790836b1b88d5163e5ba297b0f4421c0..2eab08c38e3283efd696bfff4198a16ad27c1d16 100644 (file)
@@ -513,17 +513,20 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
        bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
 }
 
-static inline void qdisc_bstats_update_cpu(struct Qdisc *sch,
-                                          const struct sk_buff *skb)
+static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
+                                    const struct sk_buff *skb)
 {
-       struct gnet_stats_basic_cpu *bstats =
-                               this_cpu_ptr(sch->cpu_bstats);
-
        u64_stats_update_begin(&bstats->syncp);
        bstats_update(&bstats->bstats, skb);
        u64_stats_update_end(&bstats->syncp);
 }
 
+static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
+                                          const struct sk_buff *skb)
+{
+       bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
+}
+
 static inline void qdisc_bstats_update(struct Qdisc *sch,
                                       const struct sk_buff *skb)
 {
@@ -547,16 +550,24 @@ static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
        sch->qstats.drops += count;
 }
 
-static inline void qdisc_qstats_drop(struct Qdisc *sch)
+static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
 {
-       sch->qstats.drops++;
+       qstats->drops++;
 }
 
-static inline void qdisc_qstats_drop_cpu(struct Qdisc *sch)
+static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
 {
-       struct gnet_stats_queue *qstats = this_cpu_ptr(sch->cpu_qstats);
+       qstats->overlimits++;
+}
 
-       qstats->drops++;
+static inline void qdisc_qstats_drop(struct Qdisc *sch)
+{
+       qstats_drop_inc(&sch->qstats);
+}
+
+static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
+{
+       qstats_drop_inc(this_cpu_ptr(sch->cpu_qstats));
 }
 
 static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
index f21f0708ec59ab6fa186657aaa15380590b061f2..43c6abcf06abc0a5bf0d56bb9235e6800a1fc111 100644 (file)
@@ -429,7 +429,9 @@ struct sock {
        void                    *sk_security;
 #endif
        __u32                   sk_mark;
+#ifdef CONFIG_CGROUP_NET_CLASSID
        u32                     sk_classid;
+#endif
        struct cg_proto         *sk_cgrp;
        void                    (*sk_state_change)(struct sock *sk);
        void                    (*sk_data_ready)(struct sock *sk);
@@ -1685,6 +1687,20 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
 kuid_t sock_i_uid(struct sock *sk);
 unsigned long sock_i_ino(struct sock *sk);
 
+static inline void sk_set_txhash(struct sock *sk)
+{
+       sk->sk_txhash = prandom_u32();
+
+       if (unlikely(!sk->sk_txhash))
+               sk->sk_txhash = 1;
+}
+
+static inline void sk_rethink_txhash(struct sock *sk)
+{
+       if (sk->sk_txhash)
+               sk_set_txhash(sk);
+}
+
 static inline struct dst_entry *
 __sk_dst_get(struct sock *sk)
 {
@@ -1709,6 +1725,8 @@ static inline void dst_negative_advice(struct sock *sk)
 {
        struct dst_entry *ndst, *dst = __sk_dst_get(sk);
 
+       sk_rethink_txhash(sk);
+
        if (dst && dst->ops->negative_advice) {
                ndst = dst->ops->negative_advice(dst);
 
index d5671f118bfc54566a34d48708e35b69af19b417..89da8934519bb720c4d5f1f479040a889e6079fd 100644 (file)
@@ -157,6 +157,9 @@ int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
 int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
                            struct net_device *dev,
                            struct net_device *filter_dev, int idx);
+void switchdev_port_fwd_mark_set(struct net_device *dev,
+                                struct net_device *group_dev,
+                                bool joining);
 
 #else
 
@@ -271,6 +274,12 @@ static inline int switchdev_port_fdb_dump(struct sk_buff *skb,
        return -EOPNOTSUPP;
 }
 
+static inline void switchdev_port_fwd_mark_set(struct net_device *dev,
+                                              struct net_device *group_dev,
+                                              bool joining)
+{
+}
+
 #endif
 
 #endif /* _LINUX_SWITCHDEV_H_ */
index 9fc9b578908ab868dcc0ef5986358ccb603bf5f9..592a6bc02b0b535087e9f2afee844b5ea37c666b 100644 (file)
@@ -6,9 +6,10 @@
 struct tcf_gact {
        struct tcf_common       common;
 #ifdef CONFIG_GACT_PROB
-        u16                    tcfg_ptype;
-        u16                    tcfg_pval;
-        int                    tcfg_paction;
+       u16                     tcfg_ptype;
+       u16                     tcfg_pval;
+       int                     tcfg_paction;
+       atomic_t                packets;
 #endif
 };
 #define to_gact(a) \
index 4dd77a1c106b246b0abc9d8af3d6dc67fa748b5c..dae96bae1c19c2d71fa7c0ea65e74d064e3b3757 100644 (file)
@@ -8,7 +8,7 @@ struct tcf_mirred {
        int                     tcfm_eaction;
        int                     tcfm_ifindex;
        int                     tcfm_ok_push;
-       struct net_device       *tcfm_dev;
+       struct net_device __rcu *tcfm_dev;
        struct list_head        tcfm_list;
 };
 #define to_mirred(a) \
index 950cfecaad3c0d01c646c4fd111eca8d0cf8aef3..364426a2be5a0f7f0a2e6daaf6ce9b9a2f3e3304 100644 (file)
@@ -989,6 +989,11 @@ static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
 
 #define TCP_INFINITE_SSTHRESH  0x7fffffff
 
+static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
+{
+       return tp->snd_cwnd < tp->snd_ssthresh;
+}
+
 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
 {
        return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
@@ -1065,7 +1070,7 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
        const struct tcp_sock *tp = tcp_sk(sk);
 
        /* If in slow start, ensure cwnd grows to twice what was ACKed. */
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
+       if (tcp_in_slow_start(tp))
                return tp->snd_cwnd < 2 * tp->max_packets_out;
 
        return tp->is_cwnd_limited;
index 68f0ecad6c6e211e8f6dac90214fff93c539eb0f..1a47946f95ba46a9ad9c2de262c9638f6a6776f4 100644 (file)
@@ -33,9 +33,6 @@ static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
 
 static inline void twsk_destructor(struct sock *sk)
 {
-       BUG_ON(sk == NULL);
-       BUG_ON(sk->sk_prot == NULL);
-       BUG_ON(sk->sk_prot->twsk_prot == NULL);
        if (sk->sk_prot->twsk_prot->twsk_destructor != NULL)
                sk->sk_prot->twsk_prot->twsk_destructor(sk);
 }
index 0082b5d33d7d3f2ea66fe94c26b8c3572188affc..eb8d721cdb676af4a5842e810fdd447f0536691d 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
 #include <linux/udp.h>
+#include <net/dst_metadata.h>
 
 #define VNI_HASH_BITS  10
 #define VNI_HASH_SIZE  (1<<VNI_HASH_BITS)
@@ -94,20 +95,18 @@ struct vxlanhdr {
 #define VXLAN_VNI_MASK  (VXLAN_VID_MASK << 8)
 #define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
 
+#define VNI_HASH_BITS  10
+#define VNI_HASH_SIZE  (1<<VNI_HASH_BITS)
+#define FDB_HASH_BITS  8
+#define FDB_HASH_SIZE  (1<<FDB_HASH_BITS)
+
 struct vxlan_metadata {
-       __be32          vni;
        u32             gbp;
 };
 
-struct vxlan_sock;
-typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb,
-                          struct vxlan_metadata *md);
-
 /* per UDP socket information */
 struct vxlan_sock {
        struct hlist_node hlist;
-       vxlan_rcv_t      *rcv;
-       void             *data;
        struct work_struct del_work;
        struct socket    *sock;
        struct rcu_head   rcu;
@@ -117,6 +116,57 @@ struct vxlan_sock {
        u32               flags;
 };
 
+union vxlan_addr {
+       struct sockaddr_in sin;
+       struct sockaddr_in6 sin6;
+       struct sockaddr sa;
+};
+
+struct vxlan_rdst {
+       union vxlan_addr         remote_ip;
+       __be16                   remote_port;
+       u32                      remote_vni;
+       u32                      remote_ifindex;
+       struct list_head         list;
+       struct rcu_head          rcu;
+};
+
+struct vxlan_config {
+       union vxlan_addr        remote_ip;
+       union vxlan_addr        saddr;
+       u32                     vni;
+       int                     remote_ifindex;
+       int                     mtu;
+       __be16                  dst_port;
+       __u16                   port_min;
+       __u16                   port_max;
+       __u8                    tos;
+       __u8                    ttl;
+       u32                     flags;
+       unsigned long           age_interval;
+       unsigned int            addrmax;
+       bool                    no_share;
+};
+
+/* Pseudo network device */
+struct vxlan_dev {
+       struct hlist_node hlist;        /* vni hash table */
+       struct list_head  next;         /* vxlan's per namespace list */
+       struct vxlan_sock *vn_sock;     /* listening socket */
+       struct net_device *dev;
+       struct net        *net;         /* netns for packet i/o */
+       struct vxlan_rdst default_dst;  /* default destination */
+       u32               flags;        /* VXLAN_F_* in vxlan.h */
+
+       struct timer_list age_timer;
+       spinlock_t        hash_lock;
+       unsigned int      addrcnt;
+
+       struct vxlan_config     cfg;
+
+       struct hlist_head fdb_head[FDB_HASH_SIZE];
+};
+
 #define VXLAN_F_LEARN                  0x01
 #define VXLAN_F_PROXY                  0x02
 #define VXLAN_F_RSC                    0x04
@@ -130,6 +180,8 @@ struct vxlan_sock {
 #define VXLAN_F_REMCSUM_RX             0x400
 #define VXLAN_F_GBP                    0x800
 #define VXLAN_F_REMCSUM_NOPARTIAL      0x1000
+#define VXLAN_F_COLLECT_METADATA       0x2000
+#define VXLAN_F_FLOW_BASED             0x4000
 
 /* Flags that are used in the receive path. These flags must match in
  * order for a socket to be shareable
@@ -137,18 +189,17 @@ struct vxlan_sock {
 #define VXLAN_F_RCV_FLAGS              (VXLAN_F_GBP |                  \
                                         VXLAN_F_UDP_ZERO_CSUM6_RX |    \
                                         VXLAN_F_REMCSUM_RX |           \
-                                        VXLAN_F_REMCSUM_NOPARTIAL)
-
-struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
-                                 vxlan_rcv_t *rcv, void *data,
-                                 bool no_share, u32 flags);
+                                        VXLAN_F_REMCSUM_NOPARTIAL |    \
+                                        VXLAN_F_COLLECT_METADATA |     \
+                                        VXLAN_F_FLOW_BASED)
 
-void vxlan_sock_release(struct vxlan_sock *vs);
+struct net_device *vxlan_dev_create(struct net *net, const char *name,
+                                   u8 name_assign_type, struct vxlan_config *conf);
 
-int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
-                  __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
-                  __be16 src_port, __be16 dst_port, struct vxlan_metadata *md,
-                  bool xnet, u32 vxflags);
+static inline __be16 vxlan_dev_dst_port(struct vxlan_dev *vxlan)
+{
+       return inet_sk(vxlan->vn_sock->sock->sk)->inet_sport;
+}
 
 static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
                                                     netdev_features_t features)
index 1ff9942718fee60ba3ef4dede12499e2d49a6fd9..aafb9937b162b47ce047e6becb180b7cd6d3c447 100644 (file)
@@ -243,6 +243,7 @@ header-y += limits.h
 header-y += llc.h
 header-y += loop.h
 header-y += lp.h
+header-y += lwtunnel.h
 header-y += magic.h
 header-y += major.h
 header-y += map_to_7segment.h
index 29ef6f99e43d1d46586fb308c15b0c65bfc33e17..bc0d27d3fbddda0d462ad701e1788f45b1f48f8c 100644 (file)
@@ -249,6 +249,27 @@ enum bpf_func_id {
         * Return: 0 on success
         */
        BPF_FUNC_get_current_comm,
+
+       /**
+        * bpf_get_cgroup_classid(skb) - retrieve a proc's classid
+        * @skb: pointer to skb
+        * Return: classid if != 0
+        */
+       BPF_FUNC_get_cgroup_classid,
+       BPF_FUNC_skb_vlan_push, /* bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) */
+       BPF_FUNC_skb_vlan_pop,  /* bpf_skb_vlan_pop(skb) */
+
+       /**
+        * bpf_skb_[gs]et_tunnel_key(skb, key, size, flags)
+        * retrieve or populate tunnel metadata
+        * @skb: pointer to skb
+        * @key: pointer to 'struct bpf_tunnel_key'
+        * @size: size of 'struct bpf_tunnel_key'
+        * @flags: room for future extensions
+        * Retrun: 0 on success
+        */
+       BPF_FUNC_skb_get_tunnel_key,
+       BPF_FUNC_skb_set_tunnel_key,
        __BPF_FUNC_MAX_ID,
 };
 
@@ -271,4 +292,9 @@ struct __sk_buff {
        __u32 cb[5];
 };
 
+struct bpf_tunnel_key {
+       __u32 tunnel_id;
+       __u32 remote_ipv4;
+};
+
 #endif /* _UAPI__LINUX_BPF_H__ */
index cd67aec187d9fab31aacba01974b8f2d145b393e..cd1629170103ef77a580a3694472e4a1452613fe 100644 (file)
@@ -1093,6 +1093,11 @@ struct ethtool_sfeatures {
  * the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values,
  * respectively.  For example, if the device supports HWTSTAMP_TX_ON,
  * then (1 << HWTSTAMP_TX_ON) in 'tx_types' will be set.
+ *
+ * Drivers should only report the filters they actually support without
+ * upscaling in the SIOCSHWTSTAMP ioctl. If the SIOCSHWSTAMP request for
+ * HWTSTAMP_FILTER_V1_SYNC is supported by HWTSTAMP_FILTER_V1_EVENT, then the
+ * driver should only report HWTSTAMP_FILTER_V1_EVENT in this op.
  */
 struct ethtool_ts_info {
        __u32   cmd;
index 2b82d7e30974f93b9a93afc2fdf5c9137492043d..96161b8202b5d026ed39904f15a899659fc39adb 100644 (file)
@@ -43,7 +43,7 @@ enum {
        FRA_UNUSED5,
        FRA_FWMARK,     /* mark */
        FRA_FLOW,       /* flow/class id */
-       FRA_UNUSED6,
+       FRA_TUN_ID,
        FRA_SUPPRESS_IFGROUP,
        FRA_SUPPRESS_PREFIXLEN,
        FRA_TABLE,      /* Extended table id */
index eaaea6208b424e7ef4fd361646b07fc497180a12..3635b77975085a5801d9d4a5555beaf70a623441 100644 (file)
@@ -182,6 +182,7 @@ struct br_mdb_entry {
 #define MDB_TEMPORARY 0
 #define MDB_PERMANENT 1
        __u8 state;
+       __u16 vid;
        struct {
                union {
                        __be32  ip4;
index 2c7e8e3d3981e7a70154f0239cfec64946c93623..ea047480a1f0ddca2025fd12db262b7bb359f9c6 100644 (file)
@@ -148,6 +148,7 @@ enum {
        IFLA_PHYS_SWITCH_ID,
        IFLA_LINK_NETNSID,
        IFLA_PHYS_PORT_NAME,
+       IFLA_PROTO_DOWN,
        __IFLA_MAX
 };
 
@@ -381,6 +382,8 @@ enum {
        IFLA_VXLAN_REMCSUM_RX,
        IFLA_VXLAN_GBP,
        IFLA_VXLAN_REMCSUM_NOPARTIAL,
+       IFLA_VXLAN_FLOWBASED,
+       IFLA_VXLAN_COLLECT_METADATA,
        __IFLA_VXLAN_MAX
 };
 #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
@@ -431,6 +434,7 @@ enum {
        IFLA_BOND_AD_ACTOR_SYS_PRIO,
        IFLA_BOND_AD_USER_PORT_KEY,
        IFLA_BOND_AD_ACTOR_SYSTEM,
+       IFLA_BOND_TLB_DYNAMIC_LB,
        __IFLA_BOND_MAX,
 };
 
index 5efa54ae567ca933a15dc1210b5af2cb569951c6..80f3b74446a1a3e56704550138e816014537e60f 100644 (file)
@@ -171,6 +171,8 @@ enum {
        DEVCONF_USE_OPTIMISTIC,
        DEVCONF_ACCEPT_RA_MTU,
        DEVCONF_STABLE_SECRET,
+       DEVCONF_USE_OIF_ADDRS_ONLY,
+       DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT,
        DEVCONF_MAX
 };
 
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
new file mode 100644 (file)
index 0000000..31377bb
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef _UAPI_LWTUNNEL_H_
+#define _UAPI_LWTUNNEL_H_
+
+#include <linux/types.h>
+
+enum lwtunnel_encap_types {
+       LWTUNNEL_ENCAP_NONE,
+       LWTUNNEL_ENCAP_MPLS,
+       LWTUNNEL_ENCAP_IP,
+       __LWTUNNEL_ENCAP_MAX,
+};
+
+#define LWTUNNEL_ENCAP_MAX (__LWTUNNEL_ENCAP_MAX - 1)
+
+
+#endif /* _UAPI_LWTUNNEL_H_ */
diff --git a/include/uapi/linux/mpls_iptunnel.h b/include/uapi/linux/mpls_iptunnel.h
new file mode 100644 (file)
index 0000000..d80a049
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ *     mpls tunnel api
+ *
+ *     Authors:
+ *             Roopa Prabhu <roopa@cumulusnetworks.com>
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _UAPI_LINUX_MPLS_IPTUNNEL_H
+#define _UAPI_LINUX_MPLS_IPTUNNEL_H
+
+/* MPLS tunnel attributes
+ * [RTA_ENCAP] = {
+ *     [MPLS_IPTUNNEL_DST]
+ * }
+ */
+enum {
+       MPLS_IPTUNNEL_UNSPEC,
+       MPLS_IPTUNNEL_DST,
+       __MPLS_IPTUNNEL_MAX,
+};
+#define MPLS_IPTUNNEL_MAX (__MPLS_IPTUNNEL_MAX - 1)
+
+#endif /* _UAPI_LINUX_MPLS_IPTUNNEL_H */
index 1dab77601c217c19bb7b0432fca4dd5371b0111d..d6b8854601872063bec49ffca7b5f0a0aa7a38be 100644 (file)
@@ -321,7 +321,7 @@ enum ovs_key_attr {
                                 * the accepted length of the array. */
 
 #ifdef __KERNEL__
-       OVS_KEY_ATTR_TUNNEL_INFO,  /* struct ovs_tunnel_info */
+       OVS_KEY_ATTR_TUNNEL_INFO,  /* struct ip_tunnel_info */
 #endif
        __OVS_KEY_ATTR_MAX
 };
index fdd8f07f1d34bd419dbd2c3f62eb09a659842cb4..47d24cb3fbc1f8017f715dff06ab5aa5f977f6b0 100644 (file)
@@ -286,6 +286,21 @@ enum rt_class_t {
 
 /* Routing message attributes */
 
+enum ip_tunnel_t {
+       IP_TUN_UNSPEC,
+       IP_TUN_ID,
+       IP_TUN_DST,
+       IP_TUN_SRC,
+       IP_TUN_TTL,
+       IP_TUN_TOS,
+       IP_TUN_SPORT,
+       IP_TUN_DPORT,
+       IP_TUN_FLAGS,
+       __IP_TUN_MAX,
+};
+
+#define IP_TUN_MAX (__IP_TUN_MAX - 1)
+
 enum rtattr_type_t {
        RTA_UNSPEC,
        RTA_DST,
@@ -308,6 +323,8 @@ enum rtattr_type_t {
        RTA_VIA,
        RTA_NEWDST,
        RTA_PREF,
+       RTA_ENCAP_TYPE,
+       RTA_ENCAP,
        __RTA_MAX
 };
 
index eee8968407f063b5d9c4776a30ebe45e5b782f2d..25a9ad8bcef1240915f2553a8acade447186d869 100644 (file)
@@ -278,6 +278,8 @@ enum
        LINUX_MIB_TCPACKSKIPPEDCHALLENGE,       /* TCPACKSkippedChallenge */
        LINUX_MIB_TCPWINPROBE,                  /* TCPWinProbe */
        LINUX_MIB_TCPKEEPALIVE,                 /* TCPKeepAlive */
+       LINUX_MIB_TCPMTUPFAIL,                  /* TCPMTUPFail */
+       LINUX_MIB_TCPMTUPSUCCESS,               /* TCPMTUPSuccess */
        __LINUX_MIB_MAX
 };
 
index c5bedc82bc1c540bf466c4d2e64f8663a974536d..fafa741614453c46ec9eb90d340221dcf9c3cc85 100644 (file)
@@ -177,6 +177,7 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 {
        return 0;
 }
+EXPORT_SYMBOL_GPL(__bpf_call_base);
 
 /**
  *     __bpf_prog_run - run eBPF program on a given context
@@ -453,7 +454,11 @@ select_insn:
                if (unlikely(!prog))
                        goto out;
 
-               ARG1 = BPF_R1;
+               /* ARG1 at this point is guaranteed to point to CTX from
+                * the verifier side due to the fact that the tail call is
+                * handeled like a helper, that is, bpf_tail_call_proto,
+                * where arg1_type is ARG_PTR_TO_CTX.
+                */
                insn = prog->insnsi;
                goto select_insn;
 out:
index 039d866fd36ab0e1d553166acbf2aa8b86bbab06..cd307df98cb33fca49c2e02704336f0bdd7c736d 100644 (file)
@@ -648,6 +648,9 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off,
        struct verifier_state *state = &env->cur_state;
        int size, err = 0;
 
+       if (state->regs[regno].type == PTR_TO_STACK)
+               off += state->regs[regno].imm;
+
        size = bpf_size_to_bytes(bpf_size);
        if (size < 0)
                return size;
@@ -667,7 +670,8 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off,
                if (!err && t == BPF_READ && value_regno >= 0)
                        mark_reg_unknown_value(state->regs, value_regno);
 
-       } else if (state->regs[regno].type == FRAME_PTR) {
+       } else if (state->regs[regno].type == FRAME_PTR ||
+                  state->regs[regno].type == PTR_TO_STACK) {
                if (off >= 0 || off < -MAX_BPF_STACK) {
                        verbose("invalid stack off=%d size=%d\n", off, size);
                        return -EACCES;
index 7f58c735d745049025407806e972bb4c7f124888..3afddf2026c983d279fe822a0c42b7f1a65f193a 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/filter.h>
+#include <linux/bpf.h>
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
 #include <linux/if_vlan.h>
@@ -355,6 +356,81 @@ static int bpf_fill_ja(struct bpf_test *self)
        return __bpf_fill_ja(self, 12, 9);
 }
 
+static int bpf_fill_ld_abs_get_processor_id(struct bpf_test *self)
+{
+       unsigned int len = BPF_MAXINSNS;
+       struct sock_filter *insn;
+       int i;
+
+       insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+       if (!insn)
+               return -ENOMEM;
+
+       for (i = 0; i < len - 1; i += 2) {
+               insn[i] = __BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 0);
+               insn[i + 1] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS,
+                                        SKF_AD_OFF + SKF_AD_CPU);
+       }
+
+       insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xbee);
+
+       self->u.ptr.insns = insn;
+       self->u.ptr.len = len;
+
+       return 0;
+}
+
+#define PUSH_CNT 68
+/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */
+static int bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
+{
+       unsigned int len = BPF_MAXINSNS;
+       struct bpf_insn *insn;
+       int i = 0, j, k = 0;
+
+       insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+       if (!insn)
+               return -ENOMEM;
+
+       insn[i++] = BPF_MOV64_REG(R6, R1);
+loop:
+       for (j = 0; j < PUSH_CNT; j++) {
+               insn[i++] = BPF_LD_ABS(BPF_B, 0);
+               insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2);
+               i++;
+               insn[i++] = BPF_MOV64_REG(R1, R6);
+               insn[i++] = BPF_MOV64_IMM(R2, 1);
+               insn[i++] = BPF_MOV64_IMM(R3, 2);
+               insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                        bpf_skb_vlan_push_proto.func - __bpf_call_base);
+               insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2);
+               i++;
+       }
+
+       for (j = 0; j < PUSH_CNT; j++) {
+               insn[i++] = BPF_LD_ABS(BPF_B, 0);
+               insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0x34, len - i - 2);
+               i++;
+               insn[i++] = BPF_MOV64_REG(R1, R6);
+               insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                        bpf_skb_vlan_pop_proto.func - __bpf_call_base);
+               insn[i] = BPF_JMP_IMM(BPF_JNE, R0, 0, len - i - 2);
+               i++;
+       }
+       if (++k < 5)
+               goto loop;
+
+       for (; i < len - 1; i++)
+               insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xbef);
+
+       insn[len - 1] = BPF_EXIT_INSN();
+
+       self->u.ptr.insns = insn;
+       self->u.ptr.len = len;
+
+       return 0;
+}
+
 static struct bpf_test tests[] = {
        {
                "TAX",
@@ -3674,6 +3750,9 @@ static struct bpf_test tests[] = {
                .u.insns_int = {
                        BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
                        BPF_ENDIAN(BPF_FROM_BE, R0, 32),
+                       BPF_ALU64_REG(BPF_MOV, R1, R0),
+                       BPF_ALU64_IMM(BPF_RSH, R1, 32),
+                       BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
                        BPF_EXIT_INSN(),
                },
                INTERNAL,
@@ -3708,6 +3787,9 @@ static struct bpf_test tests[] = {
                .u.insns_int = {
                        BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
                        BPF_ENDIAN(BPF_FROM_LE, R0, 32),
+                       BPF_ALU64_REG(BPF_MOV, R1, R0),
+                       BPF_ALU64_IMM(BPF_RSH, R1, 32),
+                       BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
                        BPF_EXIT_INSN(),
                },
                INTERNAL,
@@ -4392,6 +4474,22 @@ static struct bpf_test tests[] = {
                { { 0, 0xababcbac } },
                .fill_helper = bpf_fill_maxinsns11,
        },
+       {
+               "BPF_MAXINSNS: ld_abs+get_processor_id",
+               { },
+               CLASSIC,
+               { },
+               { { 1, 0xbee } },
+               .fill_helper = bpf_fill_ld_abs_get_processor_id,
+       },
+       {
+               "BPF_MAXINSNS: ld_abs+vlan_push/pop",
+               { },
+               INTERNAL,
+               { 0x34 },
+               { { 1, 0xbef } },
+               .fill_helper = bpf_fill_ld_abs_vlan_push_pop,
+       },
 };
 
 static struct net_device dev;
@@ -4515,6 +4613,8 @@ static struct bpf_prog *generate_filter(int which, int *err)
                }
 
                fp->len = flen;
+               /* Type doesn't really matter here as long as it's not unspec. */
+               fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
                memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
 
                bpf_prog_select_runtime(fp);
@@ -4545,14 +4645,14 @@ static int __run_one(const struct bpf_prog *fp, const void *data,
        u64 start, finish;
        int ret = 0, i;
 
-       start = ktime_to_us(ktime_get());
+       start = ktime_get_ns();
 
        for (i = 0; i < runs; i++)
                ret = BPF_PROG_RUN(fp, data);
 
-       finish = ktime_to_us(ktime_get());
+       finish = ktime_get_ns();
 
-       *duration = (finish - start) * 1000ULL;
+       *duration = finish - start;
        do_div(*duration, runs);
 
        return ret;
index c90777eae1f837f84b1b53fd8704b567b9d90835..9af7cefb195d3d3537366396d0af31180292ece9 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/rcupdate.h>
 #include <linux/rhashtable.h>
 #include <linux/slab.h>
+#include <linux/sched.h>
 
 #define MAX_ENTRIES    1000000
 #define TEST_INSERT_FAIL INT_MAX
@@ -87,6 +88,8 @@ static int __init test_rht_lookup(struct rhashtable *ht)
                                return -EINVAL;
                        }
                }
+
+               cond_resched_rcu();
        }
 
        return 0;
@@ -160,6 +163,8 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
                } else if (err) {
                        return err;
                }
+
+               cond_resched();
        }
 
        if (insert_fails)
@@ -183,6 +188,8 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
 
                        rhashtable_remove_fast(ht, &obj->node, test_rht_params);
                }
+
+               cond_resched();
        }
 
        end = ktime_get_ns();
index 94a375c04f21cd5a9b525f7dffb4a14657deea89..9055d7b9d1129d69e34f8ed255922b2e340fcbb1 100644 (file)
@@ -613,6 +613,8 @@ EXPORT_SYMBOL_GPL(lowpan_header_compress);
 
 static int __init lowpan_module_init(void)
 {
+       request_module_nowait("ipv6");
+
        request_module_nowait("nhc_dest");
        request_module_nowait("nhc_fragment");
        request_module_nowait("nhc_hop");
index 57a7c5af3175d1826f0708c551227e5e4281577e..7021c1bf44d6ce949091cb287232e6cfefed6256 100644 (file)
@@ -374,6 +374,13 @@ source "net/caif/Kconfig"
 source "net/ceph/Kconfig"
 source "net/nfc/Kconfig"
 
+config LWTUNNEL
+       bool "Network light weight tunnels"
+       ---help---
+         This feature provides an infrastructure to support light weight
+         tunnels like mpls. There is no netdevice associated with a light
+         weight tunnel endpoint. Tunnel encapsulation parameters are stored
+         with light weight tunnel state associated with fib routes.
 
 endif   # if NET
 
index cc78538d163bbf05bd6bcf5aa5c8c4954fe8ac66..aa0047c5c4672947b2b7d153dbd8a394e70ccbdf 100644 (file)
@@ -802,13 +802,10 @@ static int br2684_seq_show(struct seq_file *seq, void *v)
                           (brdev->payload == p_bridged) ? "bridged" : "routed",
                           brvcc->copies_failed, brvcc->copies_needed);
 #ifdef CONFIG_ATM_BR2684_IPFILTER
-#define b1(var, byte)  ((u8 *) &brvcc->filter.var)[byte]
-#define bs(var)                b1(var, 0), b1(var, 1), b1(var, 2), b1(var, 3)
                if (brvcc->filter.netmask != 0)
-                       seq_printf(seq, "    filter=%d.%d.%d.%d/"
-                                  "%d.%d.%d.%d\n", bs(prefix), bs(netmask));
-#undef bs
-#undef b1
+                       seq_printf(seq, "    filter=%pI4/%pI4\n",
+                                  &brvcc->filter.prefix,
+                                  &brvcc->filter.netmask);
 #endif /* CONFIG_ATM_BR2684_IPFILTER */
        }
        return 0;
index 2fb7b306490424c62bafd0fe7de6fd83176ed698..0ffe2e24020aa86b80115221811f324511cc1385 100644 (file)
@@ -859,9 +859,22 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
        SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev);
        SET_NETDEV_DEVTYPE(netdev, &bt_type);
 
+       *dev = netdev_priv(netdev);
+       (*dev)->netdev = netdev;
+       (*dev)->hdev = chan->conn->hcon->hdev;
+       INIT_LIST_HEAD(&(*dev)->peers);
+
+       spin_lock(&devices_lock);
+       INIT_LIST_HEAD(&(*dev)->list);
+       list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
+       spin_unlock(&devices_lock);
+
        err = register_netdev(netdev);
        if (err < 0) {
                BT_INFO("register_netdev failed %d", err);
+               spin_lock(&devices_lock);
+               list_del_rcu(&(*dev)->list);
+               spin_unlock(&devices_lock);
                free_netdev(netdev);
                goto out;
        }
@@ -871,16 +884,6 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
               &chan->src, chan->src_type);
        set_bit(__LINK_STATE_PRESENT, &netdev->state);
 
-       *dev = netdev_priv(netdev);
-       (*dev)->netdev = netdev;
-       (*dev)->hdev = chan->conn->hcon->hdev;
-       INIT_LIST_HEAD(&(*dev)->peers);
-
-       spin_lock(&devices_lock);
-       INIT_LIST_HEAD(&(*dev)->list);
-       list_add_rcu(&(*dev)->list, &bt_6lowpan_devices);
-       spin_unlock(&devices_lock);
-
        return 0;
 
 out:
index b8c794b87523857b9a658526ebb92dd21b22dd57..95d1a66ba03aa20095932a1c45f9f76af2cc1393 100644 (file)
@@ -53,6 +53,11 @@ source "net/bluetooth/cmtp/Kconfig"
 
 source "net/bluetooth/hidp/Kconfig"
 
+config BT_HS
+       bool "Bluetooth High Speed (HS) features"
+       depends on BT_BREDR
+       default y
+
 config BT_LE
        bool "Bluetooth Low Energy (LE) features"
        depends on BT
index 29c12ae72a665bf8c730ee5472036fd673dc8f5a..2b15ae8c1def06642682c488a9439f0e57feeb13 100644 (file)
@@ -13,9 +13,10 @@ bluetooth_6lowpan-y := 6lowpan.o
 
 bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
        hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \
-       a2mp.o amp.o ecc.o hci_request.o mgmt_util.o
+       ecc.o hci_request.o mgmt_util.o
 
 bluetooth-$(CONFIG_BT_BREDR) += sco.o
+bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o
 bluetooth-$(CONFIG_BT_DEBUGFS) += hci_debugfs.o
 bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o
 
index 5a04eb1a7e5762c82109255c2aa035bec9a840dc..5f123c3320a7be1f355d31f1023d3e3996853339 100644 (file)
@@ -16,6 +16,7 @@
 #include <net/bluetooth/hci_core.h>
 #include <net/bluetooth/l2cap.h>
 
+#include "hci_request.h"
 #include "a2mp.h"
 #include "amp.h"
 
@@ -286,11 +287,21 @@ static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
        return 0;
 }
 
+static void read_local_amp_info_complete(struct hci_dev *hdev, u8 status,
+                                        u16 opcode)
+{
+       BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+       a2mp_send_getinfo_rsp(hdev);
+}
+
 static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
                            struct a2mp_cmd *hdr)
 {
        struct a2mp_info_req *req  = (void *) skb->data;
        struct hci_dev *hdev;
+       struct hci_request hreq;
+       int err = 0;
 
        if (le16_to_cpu(hdr->len) < sizeof(*req))
                return -EINVAL;
@@ -311,7 +322,11 @@ static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
        }
 
        set_bit(READ_LOC_AMP_INFO, &mgr->state);
-       hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
+       hci_req_init(&hreq, hdev);
+       hci_req_add(&hreq, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
+       err = hci_req_run(&hreq, read_local_amp_info_complete);
+       if (err < 0)
+               a2mp_send_getinfo_rsp(hdev);
 
 done:
        if (hdev)
index 296f665adb09d01c0ffc7fe421bf8a75115bf1ff..a4ff3ea9b38a6e3cfe1a4ea63105458cd07e06a9 100644 (file)
@@ -130,10 +130,29 @@ struct a2mp_physlink_rsp {
 #define A2MP_STATUS_SECURITY_VIOLATION         0x06
 
 struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr);
+
+#if IS_ENABLED(CONFIG_BT_HS)
 int amp_mgr_put(struct amp_mgr *mgr);
 struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
                                       struct sk_buff *skb);
 void a2mp_discover_amp(struct l2cap_chan *chan);
+#else
+static inline int amp_mgr_put(struct amp_mgr *mgr)
+{
+       return 0;
+}
+
+static inline struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
+                                                    struct sk_buff *skb)
+{
+       return NULL;
+}
+
+static inline void a2mp_discover_amp(struct l2cap_chan *chan)
+{
+}
+#endif
+
 void a2mp_send_getinfo_rsp(struct hci_dev *hdev);
 void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status);
 void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status);
index ee016f03910005de87cc45a7d04b2a3989d90c2e..238ddd3cf95fb660d41f751821a09550f977f067 100644 (file)
@@ -16,6 +16,7 @@
 #include <net/bluetooth/hci_core.h>
 #include <crypto/hash.h>
 
+#include "hci_request.h"
 #include "a2mp.h"
 #include "amp.h"
 
@@ -220,10 +221,49 @@ int phylink_gen_key(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
        return hmac_sha256(gamp_key, HCI_AMP_LINK_KEY_SIZE, "802b", 4, data);
 }
 
+static void read_local_amp_assoc_complete(struct hci_dev *hdev, u8 status,
+                                         u16 opcode, struct sk_buff *skb)
+{
+       struct hci_rp_read_local_amp_assoc *rp = (void *)skb->data;
+       struct amp_assoc *assoc = &hdev->loc_assoc;
+       size_t rem_len, frag_len;
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+
+       if (rp->status)
+               goto send_rsp;
+
+       frag_len = skb->len - sizeof(*rp);
+       rem_len = __le16_to_cpu(rp->rem_len);
+
+       if (rem_len > frag_len) {
+               BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
+
+               memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
+               assoc->offset += frag_len;
+
+               /* Read other fragments */
+               amp_read_loc_assoc_frag(hdev, rp->phy_handle);
+
+               return;
+       }
+
+       memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
+       assoc->len = assoc->offset + rem_len;
+       assoc->offset = 0;
+
+send_rsp:
+       /* Send A2MP Rsp when all fragments are received */
+       a2mp_send_getampassoc_rsp(hdev, rp->status);
+       a2mp_send_create_phy_link_req(hdev, rp->status);
+}
+
 void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle)
 {
        struct hci_cp_read_local_amp_assoc cp;
        struct amp_assoc *loc_assoc = &hdev->loc_assoc;
+       struct hci_request req;
+       int err = 0;
 
        BT_DBG("%s handle %d", hdev->name, phy_handle);
 
@@ -231,12 +271,18 @@ void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle)
        cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
        cp.len_so_far = cpu_to_le16(loc_assoc->offset);
 
-       hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+       hci_req_init(&req, hdev);
+       hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+       err = hci_req_run_skb(&req, read_local_amp_assoc_complete);
+       if (err < 0)
+               a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
 }
 
 void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr)
 {
        struct hci_cp_read_local_amp_assoc cp;
+       struct hci_request req;
+       int err = 0;
 
        memset(&hdev->loc_assoc, 0, sizeof(struct amp_assoc));
        memset(&cp, 0, sizeof(cp));
@@ -244,7 +290,11 @@ void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr)
        cp.max_len = cpu_to_le16(hdev->amp_assoc_size);
 
        set_bit(READ_LOC_AMP_ASSOC, &mgr->state);
-       hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+       hci_req_init(&req, hdev);
+       hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+       hci_req_run_skb(&req, read_local_amp_assoc_complete);
+       if (err < 0)
+               a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
 }
 
 void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
@@ -252,6 +302,8 @@ void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
 {
        struct hci_cp_read_local_amp_assoc cp;
        struct amp_mgr *mgr = hcon->amp_mgr;
+       struct hci_request req;
+       int err = 0;
 
        cp.phy_handle = hcon->handle;
        cp.len_so_far = cpu_to_le16(0);
@@ -260,7 +312,25 @@ void amp_read_loc_assoc_final_data(struct hci_dev *hdev,
        set_bit(READ_LOC_AMP_ASSOC_FINAL, &mgr->state);
 
        /* Read Local AMP Assoc final link information data */
-       hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+       hci_req_init(&req, hdev);
+       hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp);
+       hci_req_run_skb(&req, read_local_amp_assoc_complete);
+       if (err < 0)
+               a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID);
+}
+
+static void write_remote_amp_assoc_complete(struct hci_dev *hdev, u8 status,
+                                           u16 opcode, struct sk_buff *skb)
+{
+       struct hci_rp_write_remote_amp_assoc *rp = (void *)skb->data;
+
+       BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
+              hdev->name, rp->status, rp->phy_handle);
+
+       if (rp->status)
+               return;
+
+       amp_write_rem_assoc_continue(hdev, rp->phy_handle);
 }
 
 /* Write AMP Assoc data fragments, returns true with last fragment written*/
@@ -270,6 +340,7 @@ static bool amp_write_rem_assoc_frag(struct hci_dev *hdev,
        struct hci_cp_write_remote_amp_assoc *cp;
        struct amp_mgr *mgr = hcon->amp_mgr;
        struct amp_ctrl *ctrl;
+       struct hci_request req;
        u16 frag_len, len;
 
        ctrl = amp_ctrl_lookup(mgr, hcon->remote_id);
@@ -307,7 +378,9 @@ static bool amp_write_rem_assoc_frag(struct hci_dev *hdev,
 
        amp_ctrl_put(ctrl);
 
-       hci_send_cmd(hdev, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp);
+       hci_req_init(&req, hdev);
+       hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, sizeof(cp), &cp);
+       hci_req_run_skb(&req, write_remote_amp_assoc_complete);
 
        kfree(cp);
 
@@ -344,10 +417,37 @@ void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle)
        amp_write_rem_assoc_frag(hdev, hcon);
 }
 
+static void create_phylink_complete(struct hci_dev *hdev, u8 status,
+                                   u16 opcode)
+{
+       struct hci_cp_create_phy_link *cp;
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+       cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
+       if (!cp)
+               return;
+
+       hci_dev_lock(hdev);
+
+       if (status) {
+               struct hci_conn *hcon;
+
+               hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
+               if (hcon)
+                       hci_conn_del(hcon);
+       } else {
+               amp_write_remote_assoc(hdev, cp->phy_handle);
+       }
+
+       hci_dev_unlock(hdev);
+}
+
 void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
                        struct hci_conn *hcon)
 {
        struct hci_cp_create_phy_link cp;
+       struct hci_request req;
 
        cp.phy_handle = hcon->handle;
 
@@ -360,13 +460,33 @@ void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
                return;
        }
 
-       hci_send_cmd(hdev, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp);
+       hci_req_init(&req, hdev);
+       hci_req_add(&req, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp);
+       hci_req_run(&req, create_phylink_complete);
+}
+
+static void accept_phylink_complete(struct hci_dev *hdev, u8 status,
+                                   u16 opcode)
+{
+       struct hci_cp_accept_phy_link *cp;
+
+       BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+       if (status)
+               return;
+
+       cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
+       if (!cp)
+               return;
+
+       amp_write_remote_assoc(hdev, cp->phy_handle);
 }
 
 void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
                        struct hci_conn *hcon)
 {
        struct hci_cp_accept_phy_link cp;
+       struct hci_request req;
 
        cp.phy_handle = hcon->handle;
 
@@ -379,7 +499,9 @@ void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
                return;
        }
 
-       hci_send_cmd(hdev, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp);
+       hci_req_init(&req, hdev);
+       hci_req_add(&req, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp);
+       hci_req_run(&req, accept_phylink_complete);
 }
 
 void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon)
index 7ea3db77ba890a18876fd3a57ddf6aa27fa9269c..8848f8158ae45d61013b5373ce4d882969178c25 100644 (file)
@@ -44,6 +44,20 @@ void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
                        struct hci_conn *hcon);
 void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr,
                        struct hci_conn *hcon);
+
+#if IS_ENABLED(CONFIG_BT_HS)
+void amp_create_logical_link(struct l2cap_chan *chan);
+void amp_disconnect_logical_link(struct hci_chan *hchan);
+#else
+static inline void amp_create_logical_link(struct l2cap_chan *chan)
+{
+}
+
+static inline void amp_disconnect_logical_link(struct hci_chan *hchan)
+{
+}
+#endif
+
 void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle);
 void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle);
 void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon);
index b0c6c6af76ef07c311ea940d482b3d45ab83696d..9a50338772f3af9875c83f628e110ffa6458781a 100644 (file)
@@ -100,9 +100,9 @@ static void cmtp_application_del(struct cmtp_session *session, struct cmtp_appli
 static struct cmtp_application *cmtp_application_get(struct cmtp_session *session, int pattern, __u16 value)
 {
        struct cmtp_application *app;
-       struct list_head *p, *n;
+       struct list_head *p;
 
-       list_for_each_safe(p, n, &session->applications) {
+       list_for_each(p, &session->applications) {
                app = list_entry(p, struct cmtp_application, list);
                switch (pattern) {
                case CMTP_MSGNUM:
@@ -511,13 +511,13 @@ static int cmtp_proc_show(struct seq_file *m, void *v)
        struct capi_ctr *ctrl = m->private;
        struct cmtp_session *session = ctrl->driverdata;
        struct cmtp_application *app;
-       struct list_head *p, *n;
+       struct list_head *p;
 
        seq_printf(m, "%s\n\n", cmtp_procinfo(ctrl));
        seq_printf(m, "addr %s\n", session->name);
        seq_printf(m, "ctrl %d\n", session->num);
 
-       list_for_each_safe(p, n, &session->applications) {
+       list_for_each(p, &session->applications) {
                app = list_entry(p, struct cmtp_application, list);
                seq_printf(m, "appl %d -> %d\n", app->appl, app->mapping);
        }
index 2f8fb33067e1c48fedf3055ddf29a3be68161c9f..bc43b6490555c7d75dae8ac452a7b012c8f1ffa1 100644 (file)
@@ -2822,10 +2822,6 @@ struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
 {
        struct hci_conn_params *params;
 
-       /* The conn params list only contains identity addresses */
-       if (!hci_is_identity_address(addr, addr_type))
-               return NULL;
-
        list_for_each_entry(params, &hdev->le_conn_params, list) {
                if (bacmp(&params->addr, addr) == 0 &&
                    params->addr_type == addr_type) {
@@ -2842,10 +2838,6 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
 {
        struct hci_conn_params *param;
 
-       /* The list only contains identity addresses */
-       if (!hci_is_identity_address(addr, addr_type))
-               return NULL;
-
        list_for_each_entry(param, list, action) {
                if (bacmp(&param->addr, addr) == 0 &&
                    param->addr_type == addr_type)
@@ -2861,9 +2853,6 @@ struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
 {
        struct hci_conn_params *params;
 
-       if (!hci_is_identity_address(addr, addr_type))
-               return NULL;
-
        params = hci_conn_params_lookup(hdev, addr, addr_type);
        if (params)
                return params;
index 32363c2b7f83d7b458eb303e2fbe7a8b050539b5..218d7dfc342f484b0b9b18c4208a2ccc5efc0cb8 100644 (file)
@@ -823,7 +823,7 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
        BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
 
        if (rp->status)
-               goto a2mp_rsp;
+               return;
 
        hdev->amp_status = rp->amp_status;
        hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
@@ -835,46 +835,6 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
        hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
        hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
        hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
-
-a2mp_rsp:
-       a2mp_send_getinfo_rsp(hdev);
-}
-
-static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
-                                       struct sk_buff *skb)
-{
-       struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
-       struct amp_assoc *assoc = &hdev->loc_assoc;
-       size_t rem_len, frag_len;
-
-       BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
-
-       if (rp->status)
-               goto a2mp_rsp;
-
-       frag_len = skb->len - sizeof(*rp);
-       rem_len = __le16_to_cpu(rp->rem_len);
-
-       if (rem_len > frag_len) {
-               BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
-
-               memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
-               assoc->offset += frag_len;
-
-               /* Read other fragments */
-               amp_read_loc_assoc_frag(hdev, rp->phy_handle);
-
-               return;
-       }
-
-       memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
-       assoc->len = assoc->offset + rem_len;
-       assoc->offset = 0;
-
-a2mp_rsp:
-       /* Send A2MP Rsp when all fragments are received */
-       a2mp_send_getampassoc_rsp(hdev, rp->status);
-       a2mp_send_create_phy_link_req(hdev, rp->status);
 }
 
 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
@@ -1409,20 +1369,6 @@ static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
        hci_dev_unlock(hdev);
 }
 
-static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
-                                         struct sk_buff *skb)
-{
-       struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
-
-       BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
-              hdev->name, rp->status, rp->phy_handle);
-
-       if (rp->status)
-               return;
-
-       amp_write_rem_assoc_continue(hdev, rp->phy_handle);
-}
-
 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
 {
        struct hci_rp_read_rssi *rp = (void *) skb->data;
@@ -1944,47 +1890,6 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
        hci_dev_unlock(hdev);
 }
 
-static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
-{
-       struct hci_cp_create_phy_link *cp;
-
-       BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
-       cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
-       if (!cp)
-               return;
-
-       hci_dev_lock(hdev);
-
-       if (status) {
-               struct hci_conn *hcon;
-
-               hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
-               if (hcon)
-                       hci_conn_del(hcon);
-       } else {
-               amp_write_remote_assoc(hdev, cp->phy_handle);
-       }
-
-       hci_dev_unlock(hdev);
-}
-
-static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
-{
-       struct hci_cp_accept_phy_link *cp;
-
-       BT_DBG("%s status 0x%2.2x", hdev->name, status);
-
-       if (status)
-               return;
-
-       cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
-       if (!cp)
-               return;
-
-       amp_write_remote_assoc(hdev, cp->phy_handle);
-}
-
 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
 {
        struct hci_cp_le_create_conn *cp;
@@ -2998,10 +2903,6 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
                hci_cc_read_clock(hdev, skb);
                break;
 
-       case HCI_OP_READ_LOCAL_AMP_ASSOC:
-               hci_cc_read_local_amp_assoc(hdev, skb);
-               break;
-
        case HCI_OP_READ_INQ_RSP_TX_POWER:
                hci_cc_read_inq_rsp_tx_power(hdev, skb);
                break;
@@ -3106,10 +3007,6 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
                hci_cc_set_adv_param(hdev, skb);
                break;
 
-       case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
-               hci_cc_write_remote_amp_assoc(hdev, skb);
-               break;
-
        case HCI_OP_READ_RSSI:
                hci_cc_read_rssi(hdev, skb);
                break;
@@ -3193,14 +3090,6 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
                hci_cs_setup_sync_conn(hdev, ev->status);
                break;
 
-       case HCI_OP_CREATE_PHY_LINK:
-               hci_cs_create_phylink(hdev, ev->status);
-               break;
-
-       case HCI_OP_ACCEPT_PHY_LINK:
-               hci_cs_accept_phylink(hdev, ev->status);
-               break;
-
        case HCI_OP_SNIFF_MODE:
                hci_cs_sniff_mode(hdev, ev->status);
                break;
@@ -4399,6 +4288,23 @@ unlock:
        hci_dev_unlock(hdev);
 }
 
+#if IS_ENABLED(CONFIG_BT_HS)
+static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
+{
+       struct hci_ev_channel_selected *ev = (void *)skb->data;
+       struct hci_conn *hcon;
+
+       BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
+
+       skb_pull(skb, sizeof(*ev));
+
+       hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
+       if (!hcon)
+               return;
+
+       amp_read_loc_assoc_final_data(hdev, hcon);
+}
+
 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
                                      struct sk_buff *skb)
 {
@@ -4522,6 +4428,7 @@ static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
 
        hci_dev_unlock(hdev);
 }
+#endif
 
 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
 {
@@ -5206,22 +5113,6 @@ static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
        }
 }
 
-static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
-{
-       struct hci_ev_channel_selected *ev = (void *) skb->data;
-       struct hci_conn *hcon;
-
-       BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
-
-       skb_pull(skb, sizeof(*ev));
-
-       hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
-       if (!hcon)
-               return;
-
-       amp_read_loc_assoc_final_data(hdev, hcon);
-}
-
 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
                                 u8 event, struct sk_buff *skb)
 {
@@ -5442,14 +5333,15 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
                hci_le_meta_evt(hdev, skb);
                break;
 
-       case HCI_EV_CHANNEL_SELECTED:
-               hci_chan_selected_evt(hdev, skb);
-               break;
-
        case HCI_EV_REMOTE_OOB_DATA_REQUEST:
                hci_remote_oob_data_request_evt(hdev, skb);
                break;
 
+#if IS_ENABLED(CONFIG_BT_HS)
+       case HCI_EV_CHANNEL_SELECTED:
+               hci_chan_selected_evt(hdev, skb);
+               break;
+
        case HCI_EV_PHY_LINK_COMPLETE:
                hci_phy_link_complete_evt(hdev, skb);
                break;
@@ -5465,6 +5357,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
        case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
                hci_disconn_phylink_complete_evt(hdev, skb);
                break;
+#endif
 
        case HCI_EV_NUM_COMP_BLOCKS:
                hci_num_comp_blocks_evt(hdev, skb);
index 244287706f910bdaf69afaef98cf2706cd301654..586b3d580cfcba0422828cab1843363178dfe85c 100644 (file)
@@ -1054,18 +1054,23 @@ static void l2cap_sock_kill(struct sock *sk)
        sock_put(sk);
 }
 
-static int __l2cap_wait_ack(struct sock *sk)
+static int __l2cap_wait_ack(struct sock *sk, struct l2cap_chan *chan)
 {
-       struct l2cap_chan *chan = l2cap_pi(sk)->chan;
        DECLARE_WAITQUEUE(wait, current);
        int err = 0;
-       int timeo = HZ/5;
+       int timeo = L2CAP_WAIT_ACK_POLL_PERIOD;
+       /* Timeout to prevent infinite loop */
+       unsigned long timeout = jiffies + L2CAP_WAIT_ACK_TIMEOUT;
 
        add_wait_queue(sk_sleep(sk), &wait);
        set_current_state(TASK_INTERRUPTIBLE);
-       while (chan->unacked_frames > 0 && chan->conn) {
+       do {
+               BT_DBG("Waiting for %d ACKs, timeout %04d ms",
+                      chan->unacked_frames, time_after(jiffies, timeout) ? 0 :
+                      jiffies_to_msecs(timeout - jiffies));
+
                if (!timeo)
-                       timeo = HZ/5;
+                       timeo = L2CAP_WAIT_ACK_POLL_PERIOD;
 
                if (signal_pending(current)) {
                        err = sock_intr_errno(timeo);
@@ -1080,7 +1085,15 @@ static int __l2cap_wait_ack(struct sock *sk)
                err = sock_error(sk);
                if (err)
                        break;
-       }
+
+               if (time_after(jiffies, timeout)) {
+                       err = -ENOLINK;
+                       break;
+               }
+
+       } while (chan->unacked_frames > 0 &&
+                chan->state == BT_CONNECTED);
+
        set_current_state(TASK_RUNNING);
        remove_wait_queue(sk_sleep(sk), &wait);
        return err;
@@ -1098,7 +1111,12 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
        if (!sk)
                return 0;
 
+       /* prevent sk structure from being freed whilst unlocked */
+       sock_hold(sk);
+
        chan = l2cap_pi(sk)->chan;
+       /* prevent chan structure from being freed whilst unlocked */
+       l2cap_chan_hold(chan);
        conn = chan->conn;
 
        BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
@@ -1110,8 +1128,10 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
        lock_sock(sk);
 
        if (!sk->sk_shutdown) {
-               if (chan->mode == L2CAP_MODE_ERTM)
-                       err = __l2cap_wait_ack(sk);
+               if (chan->mode == L2CAP_MODE_ERTM &&
+                   chan->unacked_frames > 0 &&
+                   chan->state == BT_CONNECTED)
+                       err = __l2cap_wait_ack(sk, chan);
 
                sk->sk_shutdown = SHUTDOWN_MASK;
 
@@ -1134,6 +1154,11 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
        if (conn)
                mutex_unlock(&conn->chan_lock);
 
+       l2cap_chan_put(chan);
+       sock_put(sk);
+
+       BT_DBG("err: %d", err);
+
        return err;
 }
 
index 7998fb27916568da087b2734a017355158044a75..7ab191589541c8fab56d47ab9f0e21f050751de9 100644 (file)
@@ -6226,6 +6226,17 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
        else
                auto_conn = HCI_AUTO_CONN_REPORT;
 
+       /* Kernel internally uses conn_params with resolvable private
+        * address, but Add Device allows only identity addresses.
+        * Make sure it is enforced before calling
+        * hci_conn_params_lookup.
+        */
+       if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
+               err = cmd->cmd_complete(cmd, MGMT_STATUS_INVALID_PARAMS);
+               mgmt_pending_remove(cmd);
+               goto unlock;
+       }
+
        /* If the connection parameters don't exist for this device,
         * they will be created and configured with defaults.
         */
@@ -6340,6 +6351,18 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
                else
                        addr_type = ADDR_LE_DEV_RANDOM;
 
+               /* Kernel internally uses conn_params with resolvable private
+                * address, but Remove Device allows only identity addresses.
+                * Make sure it is enforced before calling
+                * hci_conn_params_lookup.
+                */
+               if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
+                       err = cmd->cmd_complete(cmd,
+                                               MGMT_STATUS_INVALID_PARAMS);
+                       mgmt_pending_remove(cmd);
+                       goto unlock;
+               }
+
                params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
                                                addr_type);
                if (!params) {
index a538cb1199a3087673790272b61c57a1f2630651..45e4757c6fd25ed05a14e24e2d026204c3e79506 100644 (file)
@@ -281,6 +281,7 @@ void br_dev_delete(struct net_device *dev, struct list_head *head)
        br_fdb_delete_by_port(br, NULL, 0, 1);
 
        br_vlan_flush(br);
+       br_multicast_dev_del(br);
        del_timer_sync(&br->gc_timer);
 
        br_sysfs_delbr(br->dev);
index c94321955db711fdc964930c51a69d1290d39490..6a592856da1cd2d5661151689a9cc6f6e3a2202d 100644 (file)
@@ -85,6 +85,7 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
                                        memset(&e, 0, sizeof(e));
                                        e.ifindex = port->dev->ifindex;
                                        e.state = p->state;
+                                       e.vid = p->addr.vid;
                                        if (p->addr.proto == htons(ETH_P_IP))
                                                e.addr.u.ip4 = p->addr.u.ip4;
 #if IS_ENABLED(CONFIG_IPV6)
@@ -230,7 +231,7 @@ errout:
 }
 
 void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
-                  struct br_ip *group, int type)
+                  struct br_ip *group, int type, u8 state)
 {
        struct br_mdb_entry entry;
 
@@ -241,9 +242,78 @@ void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
 #if IS_ENABLED(CONFIG_IPV6)
        entry.addr.u.ip6 = group->u.ip6;
 #endif
+       entry.state = state;
+       entry.vid = group->vid;
        __br_mdb_notify(dev, &entry, type);
 }
 
+static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
+                                  struct net_device *dev,
+                                  int ifindex, u32 pid,
+                                  u32 seq, int type, unsigned int flags)
+{
+       struct br_port_msg *bpm;
+       struct nlmsghdr *nlh;
+       struct nlattr *nest;
+
+       nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
+       if (!nlh)
+               return -EMSGSIZE;
+
+       bpm = nlmsg_data(nlh);
+       memset(bpm, 0, sizeof(*bpm));
+       bpm->family = AF_BRIDGE;
+       bpm->ifindex = dev->ifindex;
+       nest = nla_nest_start(skb, MDBA_ROUTER);
+       if (!nest)
+               goto cancel;
+
+       if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
+               goto end;
+
+       nla_nest_end(skb, nest);
+       nlmsg_end(skb, nlh);
+       return 0;
+
+end:
+       nla_nest_end(skb, nest);
+cancel:
+       nlmsg_cancel(skb, nlh);
+       return -EMSGSIZE;
+}
+
+static inline size_t rtnl_rtr_nlmsg_size(void)
+{
+       return NLMSG_ALIGN(sizeof(struct br_port_msg))
+               + nla_total_size(sizeof(__u32));
+}
+
+void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
+                  int type)
+{
+       struct net *net = dev_net(dev);
+       struct sk_buff *skb;
+       int err = -ENOBUFS;
+       int ifindex;
+
+       ifindex = port ? port->dev->ifindex : 0;
+       skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
+       if (!skb)
+               goto errout;
+
+       err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
+       if (err < 0) {
+               kfree_skb(skb);
+               goto errout;
+       }
+
+       rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
+       return;
+
+errout:
+       rtnl_set_sk_err(net, RTNLGRP_MDB, err);
+}
+
 static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
 {
        if (entry->ifindex == 0)
@@ -263,6 +333,8 @@ static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
                return false;
        if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
                return false;
+       if (entry->vid >= VLAN_VID_MASK)
+               return false;
 
        return true;
 }
@@ -374,6 +446,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
                return -EINVAL;
 
        memset(&ip, 0, sizeof(ip));
+       ip.vid = entry->vid;
        ip.proto = entry->addr.proto;
        if (ip.proto == htons(ETH_P_IP))
                ip.u.ip4 = entry->addr.u.ip4;
@@ -421,6 +494,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
                return -EINVAL;
 
        memset(&ip, 0, sizeof(ip));
+       ip.vid = entry->vid;
        ip.proto = entry->addr.proto;
        if (ip.proto == htons(ETH_P_IP))
                ip.u.ip4 = entry->addr.u.ip4;
index 0b39dcc65b94f0aa571dc22dc0c97afbf4e3d744..0752796fe0ba4443036a94a385ef4d1666cd3adc 100644 (file)
@@ -283,6 +283,8 @@ static void br_multicast_del_pg(struct net_bridge *br,
                rcu_assign_pointer(*pp, p->next);
                hlist_del_init(&p->mglist);
                del_timer(&p->timer);
+               br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
+                             p->state);
                call_rcu_bh(&p->rcu, br_multicast_free_pg);
 
                if (!mp->ports && !mp->mglist &&
@@ -704,7 +706,7 @@ static int br_multicast_add_group(struct net_bridge *br,
        if (unlikely(!p))
                goto err;
        rcu_assign_pointer(*pp, p);
-       br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
+       br_mdb_notify(br->dev, port, group, RTM_NEWMDB, MDB_TEMPORARY);
 
 found:
        mod_timer(&p->timer, now + br->multicast_membership_interval);
@@ -764,6 +766,7 @@ static void br_multicast_router_expired(unsigned long data)
                goto out;
 
        hlist_del_init_rcu(&port->rlist);
+       br_rtr_notify(br->dev, port, RTM_DELMDB);
 
 out:
        spin_unlock(&br->multicast_lock);
@@ -924,6 +927,15 @@ void br_multicast_add_port(struct net_bridge_port *port)
 
 void br_multicast_del_port(struct net_bridge_port *port)
 {
+       struct net_bridge *br = port->br;
+       struct net_bridge_port_group *pg;
+       struct hlist_node *n;
+
+       /* Take care of the remaining groups, only perm ones should be left */
+       spin_lock_bh(&br->multicast_lock);
+       hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
+               br_multicast_del_pg(br, pg);
+       spin_unlock_bh(&br->multicast_lock);
        del_timer_sync(&port->multicast_router_timer);
 }
 
@@ -963,10 +975,13 @@ void br_multicast_disable_port(struct net_bridge_port *port)
 
        spin_lock(&br->multicast_lock);
        hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
-               br_multicast_del_pg(br, pg);
+               if (pg->state == MDB_TEMPORARY)
+                       br_multicast_del_pg(br, pg);
 
-       if (!hlist_unhashed(&port->rlist))
+       if (!hlist_unhashed(&port->rlist)) {
                hlist_del_init_rcu(&port->rlist);
+               br_rtr_notify(br->dev, port, RTM_DELMDB);
+       }
        del_timer(&port->multicast_router_timer);
        del_timer(&port->ip4_own_query.timer);
 #if IS_ENABLED(CONFIG_IPV6)
@@ -1204,6 +1219,7 @@ static void br_multicast_add_router(struct net_bridge *br,
                hlist_add_behind_rcu(&port->rlist, slot);
        else
                hlist_add_head_rcu(&port->rlist, &br->router_list);
+       br_rtr_notify(br->dev, port, RTM_NEWMDB);
 }
 
 static void br_multicast_mark_router(struct net_bridge *br,
@@ -1437,7 +1453,8 @@ br_multicast_leave_group(struct net_bridge *br,
                        hlist_del_init(&p->mglist);
                        del_timer(&p->timer);
                        call_rcu_bh(&p->rcu, br_multicast_free_pg);
-                       br_mdb_notify(br->dev, port, group, RTM_DELMDB);
+                       br_mdb_notify(br->dev, port, group, RTM_DELMDB,
+                                     p->state);
 
                        if (!mp->ports && !mp->mglist &&
                            netif_running(br->dev))
@@ -1754,12 +1771,6 @@ void br_multicast_open(struct net_bridge *br)
 
 void br_multicast_stop(struct net_bridge *br)
 {
-       struct net_bridge_mdb_htable *mdb;
-       struct net_bridge_mdb_entry *mp;
-       struct hlist_node *n;
-       u32 ver;
-       int i;
-
        del_timer_sync(&br->multicast_router_timer);
        del_timer_sync(&br->ip4_other_query.timer);
        del_timer_sync(&br->ip4_own_query.timer);
@@ -1767,6 +1778,15 @@ void br_multicast_stop(struct net_bridge *br)
        del_timer_sync(&br->ip6_other_query.timer);
        del_timer_sync(&br->ip6_own_query.timer);
 #endif
+}
+
+void br_multicast_dev_del(struct net_bridge *br)
+{
+       struct net_bridge_mdb_htable *mdb;
+       struct net_bridge_mdb_entry *mp;
+       struct hlist_node *n;
+       u32 ver;
+       int i;
 
        spin_lock_bh(&br->multicast_lock);
        mdb = mlock_dereference(br->mdb, br);
@@ -1834,8 +1854,10 @@ int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
                p->multicast_router = val;
                err = 0;
 
-               if (val < 2 && !hlist_unhashed(&p->rlist))
+               if (val < 2 && !hlist_unhashed(&p->rlist)) {
                        hlist_del_init_rcu(&p->rlist);
+                       br_rtr_notify(br->dev, p, RTM_DELMDB);
+               }
 
                if (val == 1)
                        break;
index 3da5525eb8a2dc21e5f64638881a351a72a0d300..91a2e08c2bb84546fb2ab7ac2bd7974e2dcb999a 100644 (file)
@@ -164,8 +164,6 @@ static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start,
                            sizeof(vinfo), &vinfo))
                        goto nla_put_failure;
 
-               vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN;
-
                vinfo.vid = vid_end;
                vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
                if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
index 8b21146b24a055652be0c7d74fd875ed875da918..e2cb359f9dd3279be534cb24d9f0a28cbcc3bf47 100644 (file)
@@ -466,6 +466,7 @@ void br_multicast_disable_port(struct net_bridge_port *port);
 void br_multicast_init(struct net_bridge *br);
 void br_multicast_open(struct net_bridge *br);
 void br_multicast_stop(struct net_bridge *br);
+void br_multicast_dev_del(struct net_bridge *br);
 void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
                          struct sk_buff *skb);
 void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
@@ -488,7 +489,9 @@ br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
 void br_mdb_init(void);
 void br_mdb_uninit(void);
 void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
-                  struct br_ip *group, int type);
+                  struct br_ip *group, int type, u8 state);
+void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
+                  int type);
 
 #define mlock_dereference(X, br) \
        rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
@@ -565,6 +568,10 @@ static inline void br_multicast_stop(struct net_bridge *br)
 {
 }
 
+static inline void br_multicast_dev_del(struct net_bridge *br)
+{
+}
+
 static inline void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
                                        struct sk_buff *skb)
 {
index fec0856dd6c031a2ae369410fc5d7f9c25a1fcf6..086b01fbe1bd846e61db3e5a7efc183abf2186b6 100644 (file)
@@ -23,3 +23,4 @@ obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += timestamping.o
 obj-$(CONFIG_NET_PTP_CLASSIFY) += ptp_classifier.o
 obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o
 obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
+obj-$(CONFIG_LWTUNNEL) += lwtunnel.o
index a8e4dd4302853702fef7fb1462fbdeb8a38f45e1..4870c3556a5a68be94cf28b65d527331810e7187 100644 (file)
@@ -3061,6 +3061,16 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
        else
                skb_dst_force(skb);
 
+#ifdef CONFIG_NET_SWITCHDEV
+       /* Don't forward if offload device already forwarded */
+       if (skb->offload_fwd_mark &&
+           skb->offload_fwd_mark == dev->offload_fwd_mark) {
+               consume_skb(skb);
+               rc = NET_XMIT_SUCCESS;
+               goto out;
+       }
+#endif
+
        txq = netdev_pick_tx(dev, skb, accel_priv);
        q = rcu_dereference_bh(txq->qdisc);
 
@@ -3645,7 +3655,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
 
        qdisc_skb_cb(skb)->pkt_len = skb->len;
        skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
-       qdisc_bstats_update_cpu(cl->q, skb);
+       qdisc_bstats_cpu_update(cl->q, skb);
 
        switch (tc_classify(skb, cl, &cl_res)) {
        case TC_ACT_OK:
@@ -3653,7 +3663,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
                skb->tc_index = TC_H_MIN(cl_res.classid);
                break;
        case TC_ACT_SHOT:
-               qdisc_qstats_drop_cpu(cl->q);
+               qdisc_qstats_cpu_drop(cl->q);
        case TC_ACT_STOLEN:
        case TC_ACT_QUEUED:
                kfree_skb(skb);
@@ -4985,7 +4995,7 @@ EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
  * Gets the next netdev_adjacent->private from the dev's lower neighbour
  * list, starting from iter position. The caller must hold either hold the
  * RTNL lock or its own locking that guarantees that the neighbour lower
- * list will remain unchainged.
+ * list will remain unchanged.
  */
 void *netdev_lower_get_next_private(struct net_device *dev,
                                    struct list_head **iter)
@@ -5040,7 +5050,7 @@ EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
  * Gets the next netdev_adjacent from the dev's lower neighbour
  * list, starting from iter position. The caller must hold RTNL lock or
  * its own locking that guarantees that the neighbour lower
- * list will remain unchainged.
+ * list will remain unchanged.
  */
 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
 {
@@ -6074,6 +6084,26 @@ int dev_get_phys_port_name(struct net_device *dev,
 }
 EXPORT_SYMBOL(dev_get_phys_port_name);
 
+/**
+ *     dev_change_proto_down - update protocol port state information
+ *     @dev: device
+ *     @proto_down: new value
+ *
+ *     This info can be used by switch drivers to set the phys state of the
+ *     port.
+ */
+int dev_change_proto_down(struct net_device *dev, bool proto_down)
+{
+       const struct net_device_ops *ops = dev->netdev_ops;
+
+       if (!ops->ndo_change_proto_down)
+               return -EOPNOTSUPP;
+       if (!netif_device_present(dev))
+               return -ENODEV;
+       return ops->ndo_change_proto_down(dev, proto_down);
+}
+EXPORT_SYMBOL(dev_change_proto_down);
+
 /**
  *     dev_new_index   -       allocate an ifindex
  *     @net: the applicable net namespace
@@ -7639,7 +7669,7 @@ static int __init net_dev_init(void)
        open_softirq(NET_RX_SOFTIRQ, net_rx_action);
 
        hotcpu_notifier(dev_cpu_callback, 0);
-       dst_init();
+       dst_subsys_init();
        rc = 0;
 out:
        return rc;
index 002144bea93517d7e2e5b2c0ac00e70c028174a2..f8694d1b8702e70db45a0eee94b5361ecb4214e0 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/prefetch.h>
 
 #include <net/dst.h>
+#include <net/dst_metadata.h>
 
 /*
  * Theory of operations:
@@ -158,19 +159,10 @@ const u32 dst_default_metrics[RTAX_MAX + 1] = {
        [RTAX_MAX] = 0xdeadbeef,
 };
 
-
-void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
-               int initial_ref, int initial_obsolete, unsigned short flags)
+void dst_init(struct dst_entry *dst, struct dst_ops *ops,
+             struct net_device *dev, int initial_ref, int initial_obsolete,
+             unsigned short flags)
 {
-       struct dst_entry *dst;
-
-       if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
-               if (ops->gc(ops))
-                       return NULL;
-       }
-       dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
-       if (!dst)
-               return NULL;
        dst->child = NULL;
        dst->dev = dev;
        if (dev)
@@ -200,6 +192,25 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
        dst->next = NULL;
        if (!(flags & DST_NOCOUNT))
                dst_entries_add(ops, 1);
+}
+EXPORT_SYMBOL(dst_init);
+
+void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
+               int initial_ref, int initial_obsolete, unsigned short flags)
+{
+       struct dst_entry *dst;
+
+       if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
+               if (ops->gc(ops))
+                       return NULL;
+       }
+
+       dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
+       if (!dst)
+               return NULL;
+
+       dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags);
+
        return dst;
 }
 EXPORT_SYMBOL(dst_alloc);
@@ -248,7 +259,11 @@ again:
                dst->ops->destroy(dst);
        if (dst->dev)
                dev_put(dst->dev);
-       kmem_cache_free(dst->ops->kmem_cachep, dst);
+
+       if (dst->flags & DST_METADATA)
+               kfree(dst);
+       else
+               kmem_cache_free(dst->ops->kmem_cachep, dst);
 
        dst = child;
        if (dst) {
@@ -329,6 +344,70 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
 }
 EXPORT_SYMBOL(__dst_destroy_metrics_generic);
 
+static struct dst_ops md_dst_ops = {
+       .family =               AF_UNSPEC,
+};
+
+static int dst_md_discard_sk(struct sock *sk, struct sk_buff *skb)
+{
+       WARN_ONCE(1, "Attempting to call output on metadata dst\n");
+       kfree_skb(skb);
+       return 0;
+}
+
+static int dst_md_discard(struct sk_buff *skb)
+{
+       WARN_ONCE(1, "Attempting to call input on metadata dst\n");
+       kfree_skb(skb);
+       return 0;
+}
+
+static void __metadata_dst_init(struct metadata_dst *md_dst, u8 optslen)
+{
+       struct dst_entry *dst;
+
+       dst = &md_dst->dst;
+       dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE,
+                DST_METADATA | DST_NOCACHE | DST_NOCOUNT);
+
+       dst->input = dst_md_discard;
+       dst->output = dst_md_discard_sk;
+
+       memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
+       md_dst->opts_len = optslen;
+}
+
+struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags)
+{
+       struct metadata_dst *md_dst;
+
+       md_dst = kmalloc(sizeof(*md_dst) + optslen, flags);
+       if (!md_dst)
+               return NULL;
+
+       __metadata_dst_init(md_dst, optslen);
+
+       return md_dst;
+}
+EXPORT_SYMBOL_GPL(metadata_dst_alloc);
+
+struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags)
+{
+       int cpu;
+       struct metadata_dst __percpu *md_dst;
+
+       md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen,
+                                   __alignof__(struct metadata_dst), flags);
+       if (!md_dst)
+               return NULL;
+
+       for_each_possible_cpu(cpu)
+               __metadata_dst_init(per_cpu_ptr(md_dst, cpu), optslen);
+
+       return md_dst;
+}
+EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu);
+
 /* Dirty hack. We did it in 2.2 (in __dst_free),
  * we have _very_ good reasons not to repeat
  * this mistake in 2.3, but we have no choice
@@ -393,7 +472,7 @@ static struct notifier_block dst_dev_notifier = {
        .priority = -10, /* must be called after other network notifiers */
 };
 
-void __init dst_init(void)
+void __init dst_subsys_init(void)
 {
        register_netdevice_notifier(&dst_dev_notifier);
 }
index 9a12668f7d62720c6ca18f09d13c45ea3e2ca2b2..ae8306e7c56f966196b570eb372a5321d146248a 100644 (file)
@@ -16,6 +16,7 @@
 #include <net/net_namespace.h>
 #include <net/sock.h>
 #include <net/fib_rules.h>
+#include <net/ip_tunnels.h>
 
 int fib_default_rule_add(struct fib_rules_ops *ops,
                         u32 pref, u32 table, u32 flags)
@@ -186,6 +187,9 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
        if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
                goto out;
 
+       if (rule->tun_id && (rule->tun_id != fl->flowi_tun_key.tun_id))
+               goto out;
+
        ret = ops->match(rule, fl, flags);
 out:
        return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
@@ -330,6 +334,9 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
        if (tb[FRA_FWMASK])
                rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
 
+       if (tb[FRA_TUN_ID])
+               rule->tun_id = nla_get_be64(tb[FRA_TUN_ID]);
+
        rule->action = frh->action;
        rule->flags = frh->flags;
        rule->table = frh_get_table(frh, tb);
@@ -407,6 +414,9 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
        if (unresolved)
                ops->unresolved_rules++;
 
+       if (rule->tun_id)
+               ip_tunnel_need_metadata();
+
        notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
        flush_route_cache(ops);
        rules_ops_put(ops);
@@ -473,6 +483,10 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
                    (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
                        continue;
 
+               if (tb[FRA_TUN_ID] &&
+                   (rule->tun_id != nla_get_be64(tb[FRA_TUN_ID])))
+                       continue;
+
                if (!ops->compare(rule, frh, tb))
                        continue;
 
@@ -487,6 +501,9 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
                                goto errout;
                }
 
+               if (rule->tun_id)
+                       ip_tunnel_unneed_metadata();
+
                list_del_rcu(&rule->list);
 
                if (rule->action == FR_ACT_GOTO) {
@@ -535,7 +552,8 @@ static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
                         + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
                         + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
                         + nla_total_size(4) /* FRA_FWMARK */
-                        + nla_total_size(4); /* FRA_FWMASK */
+                        + nla_total_size(4) /* FRA_FWMASK */
+                        + nla_total_size(8); /* FRA_TUN_ID */
 
        if (ops->nlmsg_payload)
                payload += ops->nlmsg_payload(rule);
@@ -591,7 +609,9 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
            ((rule->mark_mask || rule->mark) &&
             nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
            (rule->target &&
-            nla_put_u32(skb, FRA_GOTO, rule->target)))
+            nla_put_u32(skb, FRA_GOTO, rule->target)) ||
+           (rule->tun_id &&
+            nla_put_be64(skb, FRA_TUN_ID, rule->tun_id)))
                goto nla_put_failure;
 
        if (rule->suppress_ifgroup != -1) {
index be3098fb65e45624e2e5a94b0f653d66e68293d9..1b72264ff2ee9e3935da10b8a671988bf948181e 100644 (file)
@@ -47,6 +47,8 @@
 #include <linux/if_vlan.h>
 #include <linux/bpf.h>
 #include <net/sch_generic.h>
+#include <net/cls_cgroup.h>
+#include <net/dst_metadata.h>
 
 /**
  *     sk_filter - run a packet through a socket filter
@@ -1424,6 +1426,136 @@ const struct bpf_func_proto bpf_clone_redirect_proto = {
        .arg3_type      = ARG_ANYTHING,
 };
 
+static u64 bpf_get_cgroup_classid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+       return task_get_classid((struct sk_buff *) (unsigned long) r1);
+}
+
+static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
+       .func           = bpf_get_cgroup_classid,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+};
+
+static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
+{
+       struct sk_buff *skb = (struct sk_buff *) (long) r1;
+       __be16 vlan_proto = (__force __be16) r2;
+
+       if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
+                    vlan_proto != htons(ETH_P_8021AD)))
+               vlan_proto = htons(ETH_P_8021Q);
+
+       return skb_vlan_push(skb, vlan_proto, vlan_tci);
+}
+
+const struct bpf_func_proto bpf_skb_vlan_push_proto = {
+       .func           = bpf_skb_vlan_push,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+       .arg3_type      = ARG_ANYTHING,
+};
+EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto);
+
+static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+       struct sk_buff *skb = (struct sk_buff *) (long) r1;
+
+       return skb_vlan_pop(skb);
+}
+
+const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
+       .func           = bpf_skb_vlan_pop,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+};
+EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto);
+
+bool bpf_helper_changes_skb_data(void *func)
+{
+       if (func == bpf_skb_vlan_push)
+               return true;
+       if (func == bpf_skb_vlan_pop)
+               return true;
+       return false;
+}
+
+static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
+{
+       struct sk_buff *skb = (struct sk_buff *) (long) r1;
+       struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2;
+       struct ip_tunnel_info *info = skb_tunnel_info(skb, AF_INET);
+
+       if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags || !info))
+               return -EINVAL;
+
+       to->tunnel_id = be64_to_cpu(info->key.tun_id);
+       to->remote_ipv4 = be32_to_cpu(info->key.ipv4_src);
+
+       return 0;
+}
+
+const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
+       .func           = bpf_skb_get_tunnel_key,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_PTR_TO_STACK,
+       .arg3_type      = ARG_CONST_STACK_SIZE,
+       .arg4_type      = ARG_ANYTHING,
+};
+
+static struct metadata_dst __percpu *md_dst;
+
+static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5)
+{
+       struct sk_buff *skb = (struct sk_buff *) (long) r1;
+       struct bpf_tunnel_key *from = (struct bpf_tunnel_key *) (long) r2;
+       struct metadata_dst *md = this_cpu_ptr(md_dst);
+       struct ip_tunnel_info *info;
+
+       if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags))
+               return -EINVAL;
+
+       skb_dst_drop(skb);
+       dst_hold((struct dst_entry *) md);
+       skb_dst_set(skb, (struct dst_entry *) md);
+
+       info = &md->u.tun_info;
+       info->mode = IP_TUNNEL_INFO_TX;
+       info->key.tun_id = cpu_to_be64(from->tunnel_id);
+       info->key.ipv4_dst = cpu_to_be32(from->remote_ipv4);
+
+       return 0;
+}
+
+const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
+       .func           = bpf_skb_set_tunnel_key,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_PTR_TO_STACK,
+       .arg3_type      = ARG_CONST_STACK_SIZE,
+       .arg4_type      = ARG_ANYTHING,
+};
+
+static const struct bpf_func_proto *bpf_get_skb_set_tunnel_key_proto(void)
+{
+       if (!md_dst) {
+               /* race is not possible, since it's called from
+                * verifier that is holding verifier mutex
+                */
+               md_dst = metadata_dst_alloc_percpu(0, GFP_KERNEL);
+               if (!md_dst)
+                       return NULL;
+       }
+       return &bpf_skb_set_tunnel_key_proto;
+}
+
 static const struct bpf_func_proto *
 sk_filter_func_proto(enum bpf_func_id func_id)
 {
@@ -1461,6 +1593,16 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
                return &bpf_l4_csum_replace_proto;
        case BPF_FUNC_clone_redirect:
                return &bpf_clone_redirect_proto;
+       case BPF_FUNC_get_cgroup_classid:
+               return &bpf_get_cgroup_classid_proto;
+       case BPF_FUNC_skb_vlan_push:
+               return &bpf_skb_vlan_push_proto;
+       case BPF_FUNC_skb_vlan_pop:
+               return &bpf_skb_vlan_pop_proto;
+       case BPF_FUNC_skb_get_tunnel_key:
+               return &bpf_skb_get_tunnel_key_proto;
+       case BPF_FUNC_skb_set_tunnel_key:
+               return bpf_get_skb_set_tunnel_key_proto();
        default:
                return sk_filter_func_proto(func_id);
        }
index 2a834c6179b9973e45274d793e7d744939e5f49e..11e6540fa386918fe207b2e72e45b60d35ddb963 100644 (file)
@@ -590,6 +590,15 @@ void make_flow_keys_digest(struct flow_keys_digest *digest,
 }
 EXPORT_SYMBOL(make_flow_keys_digest);
 
+static inline void __skb_set_sw_hash(struct sk_buff *skb, u32 hash,
+                                    struct flow_keys *keys)
+{
+       if (keys->ports.ports)
+               skb->l4_hash = 1;
+       skb->sw_hash = 1;
+       skb->hash = hash;
+}
+
 /**
  * __skb_get_hash: calculate a flow hash
  * @skb: sk_buff to calculate flow hash from
@@ -609,10 +618,8 @@ void __skb_get_hash(struct sk_buff *skb)
        hash = ___skb_get_hash(skb, &keys, hashrnd);
        if (!hash)
                return;
-       if (keys.ports.ports)
-               skb->l4_hash = 1;
-       skb->sw_hash = 1;
-       skb->hash = hash;
+
+       __skb_set_sw_hash(skb, hash, &keys);
 }
 EXPORT_SYMBOL(__skb_get_hash);
 
@@ -624,6 +631,49 @@ __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
 }
 EXPORT_SYMBOL(skb_get_hash_perturb);
 
+__u32 __skb_get_hash_flowi6(struct sk_buff *skb, struct flowi6 *fl6)
+{
+       struct flow_keys keys;
+
+       memset(&keys, 0, sizeof(keys));
+
+       memcpy(&keys.addrs.v6addrs.src, &fl6->saddr,
+              sizeof(keys.addrs.v6addrs.src));
+       memcpy(&keys.addrs.v6addrs.dst, &fl6->daddr,
+              sizeof(keys.addrs.v6addrs.dst));
+       keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+       keys.ports.src = fl6->fl6_sport;
+       keys.ports.dst = fl6->fl6_dport;
+       keys.keyid.keyid = fl6->fl6_gre_key;
+       keys.tags.flow_label = (__force u32)fl6->flowlabel;
+       keys.basic.ip_proto = fl6->flowi6_proto;
+
+       __skb_set_sw_hash(skb, flow_hash_from_keys(&keys), &keys);
+
+       return skb->hash;
+}
+EXPORT_SYMBOL(__skb_get_hash_flowi6);
+
+__u32 __skb_get_hash_flowi4(struct sk_buff *skb, struct flowi4 *fl4)
+{
+       struct flow_keys keys;
+
+       memset(&keys, 0, sizeof(keys));
+
+       keys.addrs.v4addrs.src = fl4->saddr;
+       keys.addrs.v4addrs.dst = fl4->daddr;
+       keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+       keys.ports.src = fl4->fl4_sport;
+       keys.ports.dst = fl4->fl4_dport;
+       keys.keyid.keyid = fl4->fl4_gre_key;
+       keys.basic.ip_proto = fl4->flowi4_proto;
+
+       __skb_set_sw_hash(skb, flow_hash_from_keys(&keys), &keys);
+
+       return skb->hash;
+}
+EXPORT_SYMBOL(__skb_get_hash_flowi4);
+
 u32 __skb_get_poff(const struct sk_buff *skb, void *data,
                   const struct flow_keys *keys, int hlen)
 {
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
new file mode 100644 (file)
index 0000000..c240c89
--- /dev/null
@@ -0,0 +1,235 @@
+/*
+ * lwtunnel    Infrastructure for light weight tunnels like mpls
+ *
+ * Authors:    Roopa Prabhu, <roopa@cumulusnetworks.com>
+ *
+ *             This program is free software; you can redistribute it and/or
+ *             modify it under the terms of the GNU General Public License
+ *             as published by the Free Software Foundation; either version
+ *             2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/lwtunnel.h>
+#include <linux/in.h>
+#include <linux/init.h>
+#include <linux/err.h>
+
+#include <net/lwtunnel.h>
+#include <net/rtnetlink.h>
+#include <net/ip6_fib.h>
+
+struct lwtunnel_state *lwtunnel_state_alloc(int encap_len)
+{
+       struct lwtunnel_state *lws;
+
+       lws = kzalloc(sizeof(*lws) + encap_len, GFP_ATOMIC);
+
+       return lws;
+}
+EXPORT_SYMBOL(lwtunnel_state_alloc);
+
+static const struct lwtunnel_encap_ops __rcu *
+               lwtun_encaps[LWTUNNEL_ENCAP_MAX + 1] __read_mostly;
+
+int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *ops,
+                          unsigned int num)
+{
+       if (num > LWTUNNEL_ENCAP_MAX)
+               return -ERANGE;
+
+       return !cmpxchg((const struct lwtunnel_encap_ops **)
+                       &lwtun_encaps[num],
+                       NULL, ops) ? 0 : -1;
+}
+EXPORT_SYMBOL(lwtunnel_encap_add_ops);
+
+int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *ops,
+                          unsigned int encap_type)
+{
+       int ret;
+
+       if (encap_type == LWTUNNEL_ENCAP_NONE ||
+           encap_type > LWTUNNEL_ENCAP_MAX)
+               return -ERANGE;
+
+       ret = (cmpxchg((const struct lwtunnel_encap_ops **)
+                      &lwtun_encaps[encap_type],
+                      ops, NULL) == ops) ? 0 : -1;
+
+       synchronize_net();
+
+       return ret;
+}
+EXPORT_SYMBOL(lwtunnel_encap_del_ops);
+
+int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+                        struct nlattr *encap, struct lwtunnel_state **lws)
+{
+       const struct lwtunnel_encap_ops *ops;
+       int ret = -EINVAL;
+
+       if (encap_type == LWTUNNEL_ENCAP_NONE ||
+           encap_type > LWTUNNEL_ENCAP_MAX)
+               return ret;
+
+       ret = -EOPNOTSUPP;
+       rcu_read_lock();
+       ops = rcu_dereference(lwtun_encaps[encap_type]);
+       if (likely(ops && ops->build_state))
+               ret = ops->build_state(dev, encap, lws);
+       rcu_read_unlock();
+
+       return ret;
+}
+EXPORT_SYMBOL(lwtunnel_build_state);
+
+int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate)
+{
+       const struct lwtunnel_encap_ops *ops;
+       struct nlattr *nest;
+       int ret = -EINVAL;
+
+       if (!lwtstate)
+               return 0;
+
+       if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+           lwtstate->type > LWTUNNEL_ENCAP_MAX)
+               return 0;
+
+       ret = -EOPNOTSUPP;
+       nest = nla_nest_start(skb, RTA_ENCAP);
+       rcu_read_lock();
+       ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+       if (likely(ops && ops->fill_encap))
+               ret = ops->fill_encap(skb, lwtstate);
+       rcu_read_unlock();
+
+       if (ret)
+               goto nla_put_failure;
+       nla_nest_end(skb, nest);
+       ret = nla_put_u16(skb, RTA_ENCAP_TYPE, lwtstate->type);
+       if (ret)
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, nest);
+
+       return (ret == -EOPNOTSUPP ? 0 : ret);
+}
+EXPORT_SYMBOL(lwtunnel_fill_encap);
+
+int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate)
+{
+       const struct lwtunnel_encap_ops *ops;
+       int ret = 0;
+
+       if (!lwtstate)
+               return 0;
+
+       if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+           lwtstate->type > LWTUNNEL_ENCAP_MAX)
+               return 0;
+
+       rcu_read_lock();
+       ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+       if (likely(ops && ops->get_encap_size))
+               ret = nla_total_size(ops->get_encap_size(lwtstate));
+       rcu_read_unlock();
+
+       return ret;
+}
+EXPORT_SYMBOL(lwtunnel_get_encap_size);
+
+int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+       const struct lwtunnel_encap_ops *ops;
+       int ret = 0;
+
+       if (!a && !b)
+               return 0;
+
+       if (!a || !b)
+               return 1;
+
+       if (a->type != b->type)
+               return 1;
+
+       if (a->type == LWTUNNEL_ENCAP_NONE ||
+           a->type > LWTUNNEL_ENCAP_MAX)
+               return 0;
+
+       rcu_read_lock();
+       ops = rcu_dereference(lwtun_encaps[a->type]);
+       if (likely(ops && ops->cmp_encap))
+               ret = ops->cmp_encap(a, b);
+       rcu_read_unlock();
+
+       return ret;
+}
+EXPORT_SYMBOL(lwtunnel_cmp_encap);
+
+int __lwtunnel_output(struct sock *sk, struct sk_buff *skb,
+                     struct lwtunnel_state *lwtstate)
+{
+       const struct lwtunnel_encap_ops *ops;
+       int ret = -EINVAL;
+
+       if (!lwtstate)
+               goto drop;
+
+       if (lwtstate->type == LWTUNNEL_ENCAP_NONE ||
+           lwtstate->type > LWTUNNEL_ENCAP_MAX)
+               return 0;
+
+       ret = -EOPNOTSUPP;
+       rcu_read_lock();
+       ops = rcu_dereference(lwtun_encaps[lwtstate->type]);
+       if (likely(ops && ops->output))
+               ret = ops->output(sk, skb);
+       rcu_read_unlock();
+
+       if (ret == -EOPNOTSUPP)
+               goto drop;
+
+       return ret;
+
+drop:
+       kfree_skb(skb);
+
+       return ret;
+}
+
+int lwtunnel_output6(struct sock *sk, struct sk_buff *skb)
+{
+       struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
+       struct lwtunnel_state *lwtstate = NULL;
+
+       if (rt)
+               lwtstate = rt->rt6i_lwtstate;
+
+       return __lwtunnel_output(sk, skb, lwtstate);
+}
+EXPORT_SYMBOL(lwtunnel_output6);
+
+int lwtunnel_output(struct sock *sk, struct sk_buff *skb)
+{
+       struct rtable *rt = (struct rtable *)skb_dst(skb);
+       struct lwtunnel_state *lwtstate = NULL;
+
+       if (rt)
+               lwtstate = rt->rt_lwtstate;
+
+       return __lwtunnel_output(sk, skb, lwtstate);
+}
+EXPORT_SYMBOL(lwtunnel_output);
index 18b34d771ed4dc7415a17cfdab83e56ee5683d02..194c1d03b2b3b1e78254fb0108682e4dfa3ab776 100644 (file)
@@ -404,6 +404,19 @@ static ssize_t group_store(struct device *dev, struct device_attribute *attr,
 NETDEVICE_SHOW(group, fmt_dec);
 static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
 
+static int change_proto_down(struct net_device *dev, unsigned long proto_down)
+{
+       return dev_change_proto_down(dev, (bool) proto_down);
+}
+
+static ssize_t proto_down_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t len)
+{
+       return netdev_store(dev, attr, buf, len, change_proto_down);
+}
+NETDEVICE_SHOW_RW(proto_down, fmt_dec);
+
 static ssize_t phys_port_id_show(struct device *dev,
                                 struct device_attribute *attr, char *buf)
 {
@@ -501,6 +514,7 @@ static struct attribute *net_class_attrs[] = {
        &dev_attr_phys_port_id.attr,
        &dev_attr_phys_port_name.attr,
        &dev_attr_phys_switch_id.attr,
+       &dev_attr_proto_down.attr,
        NULL,
 };
 ATTRIBUTE_GROUPS(net_class);
index 1ebdf1c0d1188c309d854bc9145c9b2f5b7b58a4..0e0fb30cbc04084c96d6ae8ceec94a2e493a3bbf 100644 (file)
@@ -273,7 +273,6 @@ struct pktgen_dev {
 
        /* runtime counters relating to clone_skb */
 
-       __u64 allocated_skbs;
        __u32 clone_count;
        int last_ok;            /* Was last skb sent?
                                 * Or a failed transmit of some sort?
@@ -2279,7 +2278,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
 
 static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
 {
-       pkt_dev->pkt_overhead = 0;
+       pkt_dev->pkt_overhead = LL_RESERVED_SPACE(pkt_dev->odev);
        pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32);
        pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev);
        pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev);
@@ -2788,6 +2787,7 @@ static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
        } else {
                 skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
        }
+       skb_reserve(skb, LL_RESERVED_SPACE(dev));
 
        return skb;
 }
@@ -3397,7 +3397,6 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
                        return;
                }
                pkt_dev->last_pkt_size = pkt_dev->skb->len;
-               pkt_dev->allocated_skbs++;
                pkt_dev->clone_count = 0;       /* reset counter */
        }
 
index dc004b1e1f8515250bb7c7f284b047d2f961f083..788ceed394636e4a3c3c38e4fbfe383dca8e48df 100644 (file)
@@ -896,7 +896,9 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
               + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
               + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
               + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
-              + nla_total_size(MAX_PHYS_ITEM_ID_LEN); /* IFLA_PHYS_SWITCH_ID */
+              + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
+              + nla_total_size(1); /* IFLA_PROTO_DOWN */
+
 }
 
 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
@@ -1082,7 +1084,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
            (dev->ifalias &&
             nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)) ||
            nla_put_u32(skb, IFLA_CARRIER_CHANGES,
-                       atomic_read(&dev->carrier_changes)))
+                       atomic_read(&dev->carrier_changes)) ||
+           nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
                goto nla_put_failure;
 
        if (1) {
@@ -1319,6 +1322,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
        [IFLA_CARRIER_CHANGES]  = { .type = NLA_U32 },  /* ignored */
        [IFLA_PHYS_SWITCH_ID]   = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
        [IFLA_LINK_NETNSID]     = { .type = NLA_S32 },
+       [IFLA_PROTO_DOWN]       = { .type = NLA_U8 },
 };
 
 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -1861,6 +1865,14 @@ static int do_setlink(const struct sk_buff *skb,
        }
        err = 0;
 
+       if (tb[IFLA_PROTO_DOWN]) {
+               err = dev_change_proto_down(dev,
+                                           nla_get_u8(tb[IFLA_PROTO_DOWN]));
+               if (err)
+                       goto errout;
+               status |= DO_SETLINK_NOTIFY;
+       }
+
 errout:
        if (status & DO_SETLINK_MODIFIED) {
                if (status & DO_SETLINK_NOTIFY)
@@ -1951,16 +1963,30 @@ static int rtnl_group_dellink(const struct net *net, int group)
        return 0;
 }
 
+int rtnl_delete_link(struct net_device *dev)
+{
+       const struct rtnl_link_ops *ops;
+       LIST_HEAD(list_kill);
+
+       ops = dev->rtnl_link_ops;
+       if (!ops || !ops->dellink)
+               return -EOPNOTSUPP;
+
+       ops->dellink(dev, &list_kill);
+       unregister_netdevice_many(&list_kill);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rtnl_delete_link);
+
 static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
        struct net *net = sock_net(skb->sk);
-       const struct rtnl_link_ops *ops;
        struct net_device *dev;
        struct ifinfomsg *ifm;
        char ifname[IFNAMSIZ];
        struct nlattr *tb[IFLA_MAX+1];
        int err;
-       LIST_HEAD(list_kill);
 
        err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
        if (err < 0)
@@ -1982,13 +2008,7 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
        if (!dev)
                return -ENODEV;
 
-       ops = dev->rtnl_link_ops;
-       if (!ops || !ops->dellink)
-               return -EOPNOTSUPP;
-
-       ops->dellink(dev, &list_kill);
-       unregister_netdevice_many(&list_kill);
-       return 0;
+       return rtnl_delete_link(dev);
 }
 
 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
index 43d3dd62fcc8eccd95a4618f68b0553cf7309c01..42689d5c468cb4f53baa058c74cdee58099137c7 100644 (file)
@@ -60,11 +60,15 @@ bool skb_defer_rx_timestamp(struct sk_buff *skb)
        struct phy_device *phydev;
        unsigned int type;
 
+       if (!skb->dev || !skb->dev->phydev || !skb->dev->phydev->drv)
+               return false;
+
        if (skb_headroom(skb) < ETH_HLEN)
                return false;
+
        __skb_push(skb, ETH_HLEN);
 
-       type = classify(skb);
+       type = ptp_classify_raw(skb);
 
        __skb_pull(skb, ETH_HLEN);
 
index d5f1f9b862ea5f4794ba2fab277e19aed8e51d2a..311796c809afc2f3807c653ebc64b81ef9bb2152 100644 (file)
 
 #include <linux/phy.h>
 #include <linux/netdevice.h>
+#include <linux/netpoll.h>
 
 struct dsa_device_ops {
-       netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev);
+       struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev);
        int (*rcv)(struct sk_buff *skb, struct net_device *dev,
                   struct packet_type *pt, struct net_device *orig_dev);
 };
@@ -26,7 +27,7 @@ struct dsa_slave_priv {
         * switch port.
         */
        struct net_device       *dev;
-       netdev_tx_t             (*xmit)(struct sk_buff *skb,
+       struct sk_buff *        (*xmit)(struct sk_buff *skb,
                                        struct net_device *dev);
 
        /*
@@ -47,6 +48,9 @@ struct dsa_slave_priv {
        int                     old_duplex;
 
        struct net_device       *bridge_dev;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       struct netpoll          *netpoll;
+#endif
 };
 
 /* dsa.c */
index 0917123790eaf09b001c97a733039185fdb0a800..0010c690cc6715838c76da2d42c93ea9dcc113fc 100644 (file)
@@ -18,6 +18,7 @@
 #include <net/rtnetlink.h>
 #include <net/switchdev.h>
 #include <linux/if_bridge.h>
+#include <linux/netpoll.h>
 #include "dsa_priv.h"
 
 /* slave mii_bus handling ***************************************************/
@@ -418,24 +419,53 @@ static int dsa_slave_port_attr_get(struct net_device *dev,
        return 0;
 }
 
-static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
+static inline netdev_tx_t dsa_netpoll_send_skb(struct dsa_slave_priv *p,
+                                              struct sk_buff *skb)
 {
-       struct dsa_slave_priv *p = netdev_priv(dev);
-
-       return p->xmit(skb, dev);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       if (p->netpoll)
+               netpoll_send_skb(p->netpoll, skb);
+#else
+       BUG();
+#endif
+       return NETDEV_TX_OK;
 }
 
-static netdev_tx_t dsa_slave_notag_xmit(struct sk_buff *skb,
-                                       struct net_device *dev)
+static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
+       struct sk_buff *nskb;
 
-       skb->dev = p->parent->dst->master_netdev;
-       dev_queue_xmit(skb);
+       dev->stats.tx_packets++;
+       dev->stats.tx_bytes += skb->len;
+
+       /* Transmit function may have to reallocate the original SKB */
+       nskb = p->xmit(skb, dev);
+       if (!nskb)
+               return NETDEV_TX_OK;
+
+       /* SKB for netpoll still need to be mangled with the protocol-specific
+        * tag to be successfully transmitted
+        */
+       if (unlikely(netpoll_tx_running(dev)))
+               return dsa_netpoll_send_skb(p, nskb);
+
+       /* Queue the SKB for transmission on the parent interface, but
+        * do not modify its EtherType
+        */
+       nskb->dev = p->parent->dst->master_netdev;
+       dev_queue_xmit(nskb);
 
        return NETDEV_TX_OK;
 }
 
+static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
+                                           struct net_device *dev)
+{
+       /* Just return the original SKB */
+       return skb;
+}
+
 
 /* ethtool operations *******************************************************/
 static int
@@ -665,6 +695,49 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
        return ret;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static int dsa_slave_netpoll_setup(struct net_device *dev,
+                                  struct netpoll_info *ni)
+{
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       struct dsa_switch *ds = p->parent;
+       struct net_device *master = ds->dst->master_netdev;
+       struct netpoll *netpoll;
+       int err = 0;
+
+       netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL);
+       if (!netpoll)
+               return -ENOMEM;
+
+       err = __netpoll_setup(netpoll, master);
+       if (err) {
+               kfree(netpoll);
+               goto out;
+       }
+
+       p->netpoll = netpoll;
+out:
+       return err;
+}
+
+static void dsa_slave_netpoll_cleanup(struct net_device *dev)
+{
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       struct netpoll *netpoll = p->netpoll;
+
+       if (!netpoll)
+               return;
+
+       p->netpoll = NULL;
+
+       __netpoll_free_async(netpoll);
+}
+
+static void dsa_slave_poll_controller(struct net_device *dev)
+{
+}
+#endif
+
 static const struct ethtool_ops dsa_slave_ethtool_ops = {
        .get_settings           = dsa_slave_get_settings,
        .set_settings           = dsa_slave_set_settings,
@@ -697,6 +770,11 @@ static const struct net_device_ops dsa_slave_netdev_ops = {
        .ndo_fdb_dump           = dsa_slave_fdb_dump,
        .ndo_do_ioctl           = dsa_slave_ioctl,
        .ndo_get_iflink         = dsa_slave_get_iflink,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_netpoll_setup      = dsa_slave_netpoll_setup,
+       .ndo_netpoll_cleanup    = dsa_slave_netpoll_cleanup,
+       .ndo_poll_controller    = dsa_slave_poll_controller,
+#endif
 };
 
 static const struct switchdev_ops dsa_slave_switchdev_ops = {
index 83d3572cdb205934e3099c258244f347d7351517..e2aadb73111d544c1ce16db13bfd99c7506642fc 100644 (file)
 #define BRCM_EG_TC_MASK                0x7
 #define BRCM_EG_PID_MASK       0x1f
 
-static netdev_tx_t brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
        u8 *brcm_tag;
 
-       dev->stats.tx_packets++;
-       dev->stats.tx_bytes += skb->len;
-
        if (skb_cow_head(skb, BRCM_TAG_LEN) < 0)
                goto out_free;
 
@@ -87,17 +84,11 @@ static netdev_tx_t brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev)
                brcm_tag[2] = BRCM_IG_DSTMAP2_MASK;
        brcm_tag[3] = (1 << p->port) & BRCM_IG_DSTMAP1_MASK;
 
-       /* Queue the SKB for transmission on the parent interface, but
-        * do not modify its EtherType
-        */
-       skb->dev = p->parent->dst->master_netdev;
-       dev_queue_xmit(skb);
-
-       return NETDEV_TX_OK;
+       return skb;
 
 out_free:
        kfree_skb(skb);
-       return NETDEV_TX_OK;
+       return NULL;
 }
 
 static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
index 2dab27063273d2d48d12cc13b9d73fefe92c9362..aa780e4ac0bd9653618c2b4c582d882cfb52f94a 100644 (file)
 
 #define DSA_HLEN       4
 
-static netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
        u8 *dsa_header;
 
-       dev->stats.tx_packets++;
-       dev->stats.tx_bytes += skb->len;
-
        /*
         * Convert the outermost 802.1q tag to a DSA tag for tagged
         * packets, or insert a DSA tag between the addresses and
@@ -63,14 +60,11 @@ static netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev)
                dsa_header[3] = 0x00;
        }
 
-       skb->dev = p->parent->dst->master_netdev;
-       dev_queue_xmit(skb);
-
-       return NETDEV_TX_OK;
+       return skb;
 
 out_free:
        kfree_skb(skb);
-       return NETDEV_TX_OK;
+       return NULL;
 }
 
 static int dsa_rcv(struct sk_buff *skb, struct net_device *dev,
index 9aeda596f7ec4ec2c30df9167e87c7719535de91..2288c8098c42800c6477068c0334d7e1874ba608 100644 (file)
 #define DSA_HLEN       4
 #define EDSA_HLEN      8
 
-static netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
        u8 *edsa_header;
 
-       dev->stats.tx_packets++;
-       dev->stats.tx_bytes += skb->len;
-
        /*
         * Convert the outermost 802.1q tag to a DSA tag and prepend
         * a DSA ethertype field is the packet is tagged, or insert
@@ -76,14 +73,11 @@ static netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev)
                edsa_header[7] = 0x00;
        }
 
-       skb->dev = p->parent->dst->master_netdev;
-       dev_queue_xmit(skb);
-
-       return NETDEV_TX_OK;
+       return skb;
 
 out_free:
        kfree_skb(skb);
-       return NETDEV_TX_OK;
+       return NULL;
 }
 
 static int edsa_rcv(struct sk_buff *skb, struct net_device *dev,
index e268f9db8893deab7c2febd26ad0aae1849b157d..d25efc93d8f120739c83c3998e77d5e9dd3cfc45 100644 (file)
 #include <linux/slab.h>
 #include "dsa_priv.h"
 
-static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
        struct sk_buff *nskb;
        int padlen;
        u8 *trailer;
 
-       dev->stats.tx_packets++;
-       dev->stats.tx_bytes += skb->len;
-
        /*
         * We have to make sure that the trailer ends up as the very
         * last 4 bytes of the packet.  This means that we have to pad
@@ -36,7 +33,7 @@ static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev)
        nskb = alloc_skb(NET_IP_ALIGN + skb->len + padlen + 4, GFP_ATOMIC);
        if (nskb == NULL) {
                kfree_skb(skb);
-               return NETDEV_TX_OK;
+               return NULL;
        }
        skb_reserve(nskb, NET_IP_ALIGN);
 
@@ -57,10 +54,7 @@ static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev)
        trailer[2] = 0x10;
        trailer[3] = 0x00;
 
-       nskb->dev = p->parent->dst->master_netdev;
-       dev_queue_xmit(nskb);
-
-       return NETDEV_TX_OK;
+       return nskb;
 }
 
 static int trailer_rcv(struct sk_buff *skb, struct net_device *dev,
index b2155a123f6c88980c180eeb7b4ffdcf68bea4fb..8d5960a37195136380032644b65a8b741a03ab00 100644 (file)
@@ -23,6 +23,26 @@ rdev_del_virtual_intf_deprecated(struct cfg802154_registered_device *rdev,
        rdev->ops->del_virtual_intf_deprecated(&rdev->wpan_phy, dev);
 }
 
+static inline int
+rdev_suspend(struct cfg802154_registered_device *rdev)
+{
+       int ret;
+       trace_802154_rdev_suspend(&rdev->wpan_phy);
+       ret = rdev->ops->suspend(&rdev->wpan_phy);
+       trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+       return ret;
+}
+
+static inline int
+rdev_resume(struct cfg802154_registered_device *rdev)
+{
+       int ret;
+       trace_802154_rdev_resume(&rdev->wpan_phy);
+       ret = rdev->ops->resume(&rdev->wpan_phy);
+       trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+       return ret;
+}
+
 static inline int
 rdev_add_virtual_intf(struct cfg802154_registered_device *rdev, char *name,
                      unsigned char name_assign_type,
index 133b4280660cfc2f9b651a56a95502991e21840b..bd88525b041e79c62a0458c499968315c5a56e4e 100644 (file)
  */
 
 #include <linux/device.h>
+#include <linux/rtnetlink.h>
 
 #include <net/cfg802154.h>
 
 #include "core.h"
 #include "sysfs.h"
+#include "rdev-ops.h"
 
 static inline struct cfg802154_registered_device *
 dev_to_rdev(struct device *dev)
@@ -62,10 +64,46 @@ static struct attribute *pmib_attrs[] = {
 };
 ATTRIBUTE_GROUPS(pmib);
 
+#ifdef CONFIG_PM_SLEEP
+static int wpan_phy_suspend(struct device *dev)
+{
+       struct cfg802154_registered_device *rdev = dev_to_rdev(dev);
+       int ret = 0;
+
+       if (rdev->ops->suspend) {
+               rtnl_lock();
+               ret = rdev_suspend(rdev);
+               rtnl_unlock();
+       }
+
+       return ret;
+}
+
+static int wpan_phy_resume(struct device *dev)
+{
+       struct cfg802154_registered_device *rdev = dev_to_rdev(dev);
+       int ret = 0;
+
+       if (rdev->ops->resume) {
+               rtnl_lock();
+               ret = rdev_resume(rdev);
+               rtnl_unlock();
+       }
+
+       return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(wpan_phy_pm_ops, wpan_phy_suspend, wpan_phy_resume);
+#define WPAN_PHY_PM_OPS (&wpan_phy_pm_ops)
+#else
+#define WPAN_PHY_PM_OPS NULL
+#endif
+
 struct class wpan_phy_class = {
        .name = "ieee802154",
        .dev_release = wpan_phy_release,
        .dev_groups = pmib_groups,
+       .pm = WPAN_PHY_PM_OPS,
 };
 
 int wpan_phy_sysfs_init(void)
index 9b5f0eb366969c0c968935389b97ad0893b41cf1..4399b7fbaa31481c402079680e3509ed05fb9479 100644 (file)
  *                     rdev->ops traces                     *
  *************************************************************/
 
+DECLARE_EVENT_CLASS(wpan_phy_only_evt,
+       TP_PROTO(struct wpan_phy *wpan_phy),
+       TP_ARGS(wpan_phy),
+       TP_STRUCT__entry(
+               WPAN_PHY_ENTRY
+       ),
+       TP_fast_assign(
+               WPAN_PHY_ASSIGN;
+       ),
+       TP_printk(WPAN_PHY_PR_FMT, WPAN_PHY_PR_ARG)
+);
+
+DEFINE_EVENT(wpan_phy_only_evt, 802154_rdev_suspend,
+       TP_PROTO(struct wpan_phy *wpan_phy),
+       TP_ARGS(wpan_phy)
+);
+
+DEFINE_EVENT(wpan_phy_only_evt, 802154_rdev_resume,
+       TP_PROTO(struct wpan_phy *wpan_phy),
+       TP_ARGS(wpan_phy)
+);
+
 TRACE_EVENT(802154_rdev_add_virtual_intf,
        TP_PROTO(struct wpan_phy *wpan_phy, char *name,
                 enum nl802154_iftype type, __le64 extended_addr),
index 9532ee87151f5d184205eafc70729fc543a9eb82..cc4e498a0ccf390115c2f6f0306248650f4a2c35 100644 (file)
 #include <net/raw.h>
 #include <net/icmp.h>
 #include <net/inet_common.h>
+#include <net/ip_tunnels.h>
 #include <net/xfrm.h>
 #include <net/net_namespace.h>
 #include <net/secure_seq.h>
@@ -1780,6 +1781,8 @@ static int __init inet_init(void)
 
        dev_add_pack(&ip_packet_type);
 
+       ip_tunnel_core_init();
+
        rc = 0;
 out:
        return rc;
index 6c8b1fbafce8e39cb7c4c058fa653dd9d2f52f9e..34a308573f4b0e7bcae0ad9ffa5e45b19c72c1d1 100644 (file)
@@ -291,6 +291,40 @@ static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb)
        kfree_skb(skb);
 }
 
+/* Create and send an arp packet. */
+static void arp_send_dst(int type, int ptype, __be32 dest_ip,
+                        struct net_device *dev, __be32 src_ip,
+                        const unsigned char *dest_hw,
+                        const unsigned char *src_hw,
+                        const unsigned char *target_hw, struct sk_buff *oskb)
+{
+       struct sk_buff *skb;
+
+       /* arp on this interface. */
+       if (dev->flags & IFF_NOARP)
+               return;
+
+       skb = arp_create(type, ptype, dest_ip, dev, src_ip,
+                        dest_hw, src_hw, target_hw);
+       if (!skb)
+               return;
+
+       if (oskb)
+               skb_dst_copy(skb, oskb);
+
+       arp_xmit(skb);
+}
+
+void arp_send(int type, int ptype, __be32 dest_ip,
+             struct net_device *dev, __be32 src_ip,
+             const unsigned char *dest_hw, const unsigned char *src_hw,
+             const unsigned char *target_hw)
+{
+       arp_send_dst(type, ptype, dest_ip, dev, src_ip, dest_hw, src_hw,
+                    target_hw, NULL);
+}
+EXPORT_SYMBOL(arp_send);
+
 static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
 {
        __be32 saddr = 0;
@@ -346,8 +380,9 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
                }
        }
 
-       arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
-                dst_hw, dev->dev_addr, NULL);
+       arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
+                    dst_hw, dev->dev_addr, NULL,
+                    dev->priv_flags & IFF_XMIT_DST_RELEASE ? NULL : skb);
 }
 
 static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
@@ -596,32 +631,6 @@ void arp_xmit(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(arp_xmit);
 
-/*
- *     Create and send an arp packet.
- */
-void arp_send(int type, int ptype, __be32 dest_ip,
-             struct net_device *dev, __be32 src_ip,
-             const unsigned char *dest_hw, const unsigned char *src_hw,
-             const unsigned char *target_hw)
-{
-       struct sk_buff *skb;
-
-       /*
-        *      No arp on this interface.
-        */
-
-       if (dev->flags&IFF_NOARP)
-               return;
-
-       skb = arp_create(type, ptype, dest_ip, dev, src_ip,
-                        dest_hw, src_hw, target_hw);
-       if (!skb)
-               return;
-
-       arp_xmit(skb);
-}
-EXPORT_SYMBOL(arp_send);
-
 /*
  *     Process an arp request.
  */
index 574fad9cca052cb2970e283a4dc5568c4b3b8b23..f915abff1350a86af8d5bb89725b751c061b0fb5 100644 (file)
@@ -74,7 +74,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
        inet->inet_daddr = fl4->daddr;
        inet->inet_dport = usin->sin_port;
        sk->sk_state = TCP_ESTABLISHED;
-       inet_set_txhash(sk);
+       sk_set_txhash(sk);
        inet->inet_id = jiffies;
 
        sk_dst_set(sk, &rt->dst);
index 6bbc54940eb4ee4802a5a94835e44b0d22b170dd..6b98de0d79498d575a44d6c20bd3abb5a38ea75c 100644 (file)
@@ -280,6 +280,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
                fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
                fl4.flowi4_scope = scope;
                fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
+               fl4.flowi4_tun_key.tun_id = 0;
                if (!fib_lookup(net, &fl4, &res, 0))
                        return FIB_RES_PREFSRC(net, res);
        } else {
@@ -313,6 +314,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
        fl4.saddr = dst;
        fl4.flowi4_tos = tos;
        fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
+       fl4.flowi4_tun_key.tun_id = 0;
 
        no_addr = idev->ifa_list == NULL;
 
@@ -591,6 +593,8 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
        [RTA_METRICS]           = { .type = NLA_NESTED },
        [RTA_MULTIPATH]         = { .len = sizeof(struct rtnexthop) },
        [RTA_FLOW]              = { .type = NLA_U32 },
+       [RTA_ENCAP_TYPE]        = { .type = NLA_U16 },
+       [RTA_ENCAP]             = { .type = NLA_NESTED },
 };
 
 static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
@@ -656,6 +660,12 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
                case RTA_TABLE:
                        cfg->fc_table = nla_get_u32(attr);
                        break;
+               case RTA_ENCAP:
+                       cfg->fc_encap = attr;
+                       break;
+               case RTA_ENCAP_TYPE:
+                       cfg->fc_encap_type = nla_get_u16(attr);
+                       break;
                }
        }
 
index 3a06586b170c0947ef62ecc08a5dcf1a1c768011..558e196bae0f5a10a6bc81102c657db00b726ad1 100644 (file)
@@ -42,6 +42,7 @@
 #include <net/ip_fib.h>
 #include <net/netlink.h>
 #include <net/nexthop.h>
+#include <net/lwtunnel.h>
 
 #include "fib_lookup.h"
 
@@ -208,6 +209,7 @@ static void free_fib_info_rcu(struct rcu_head *head)
        change_nexthops(fi) {
                if (nexthop_nh->nh_dev)
                        dev_put(nexthop_nh->nh_dev);
+               lwtstate_put(nexthop_nh->nh_lwtstate);
                free_nh_exceptions(nexthop_nh);
                rt_fibinfo_free_cpus(nexthop_nh->nh_pcpu_rth_output);
                rt_fibinfo_free(&nexthop_nh->nh_rth_input);
@@ -266,6 +268,7 @@ static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
 #ifdef CONFIG_IP_ROUTE_CLASSID
                    nh->nh_tclassid != onh->nh_tclassid ||
 #endif
+                   lwtunnel_cmp_encap(nh->nh_lwtstate, onh->nh_lwtstate) ||
                    ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_COMPARE_MASK))
                        return -1;
                onh++;
@@ -366,6 +369,7 @@ static inline size_t fib_nlmsg_size(struct fib_info *fi)
        payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
 
        if (fi->fib_nhs) {
+               size_t nh_encapsize = 0;
                /* Also handles the special case fib_nhs == 1 */
 
                /* each nexthop is packed in an attribute */
@@ -374,8 +378,21 @@ static inline size_t fib_nlmsg_size(struct fib_info *fi)
                /* may contain flow and gateway attribute */
                nhsize += 2 * nla_total_size(4);
 
+               /* grab encap info */
+               for_nexthops(fi) {
+                       if (nh->nh_lwtstate) {
+                               /* RTA_ENCAP_TYPE */
+                               nh_encapsize += lwtunnel_get_encap_size(
+                                               nh->nh_lwtstate);
+                               /* RTA_ENCAP */
+                               nh_encapsize +=  nla_total_size(2);
+                       }
+               } endfor_nexthops(fi);
+
                /* all nexthops are packed in a nested attribute */
-               payload += nla_total_size(fi->fib_nhs * nhsize);
+               payload += nla_total_size((fi->fib_nhs * nhsize) +
+                                         nh_encapsize);
+
        }
 
        return payload;
@@ -421,13 +438,15 @@ static int fib_detect_death(struct fib_info *fi, int order,
        if (n) {
                state = n->nud_state;
                neigh_release(n);
+       } else {
+               return 0;
        }
        if (state == NUD_REACHABLE)
                return 0;
        if ((state & NUD_VALID) && order != dflt)
                return 0;
        if ((state & NUD_VALID) ||
-           (*last_idx < 0 && order > dflt)) {
+           (*last_idx < 0 && order > dflt && state != NUD_INCOMPLETE)) {
                *last_resort = fi;
                *last_idx = order;
        }
@@ -452,6 +471,9 @@ static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining)
 static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
                       int remaining, struct fib_config *cfg)
 {
+       struct net *net = cfg->fc_nlinfo.nl_net;
+       int ret;
+
        change_nexthops(fi) {
                int attrlen;
 
@@ -475,18 +497,66 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
                        if (nexthop_nh->nh_tclassid)
                                fi->fib_net->ipv4.fib_num_tclassid_users++;
 #endif
+                       nla = nla_find(attrs, attrlen, RTA_ENCAP);
+                       if (nla) {
+                               struct lwtunnel_state *lwtstate;
+                               struct net_device *dev = NULL;
+                               struct nlattr *nla_entype;
+
+                               nla_entype = nla_find(attrs, attrlen,
+                                                     RTA_ENCAP_TYPE);
+                               if (!nla_entype)
+                                       goto err_inval;
+                               if (cfg->fc_oif)
+                                       dev = __dev_get_by_index(net, cfg->fc_oif);
+                               ret = lwtunnel_build_state(dev, nla_get_u16(
+                                                          nla_entype),
+                                                          nla, &lwtstate);
+                               if (ret)
+                                       goto errout;
+                               nexthop_nh->nh_lwtstate =
+                                       lwtstate_get(lwtstate);
+                       }
                }
 
                rtnh = rtnh_next(rtnh, &remaining);
        } endfor_nexthops(fi);
 
        return 0;
+
+err_inval:
+       ret = -EINVAL;
+
+errout:
+       return ret;
 }
 
 #endif
 
+int fib_encap_match(struct net *net, u16 encap_type,
+                   struct nlattr *encap,
+                   int oif, const struct fib_nh *nh)
+{
+       struct lwtunnel_state *lwtstate;
+       struct net_device *dev = NULL;
+       int ret;
+
+       if (encap_type == LWTUNNEL_ENCAP_NONE)
+               return 0;
+
+       if (oif)
+               dev = __dev_get_by_index(net, oif);
+       ret = lwtunnel_build_state(dev, encap_type,
+                                  encap, &lwtstate);
+       if (!ret)
+               return lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate);
+
+       return 0;
+}
+
 int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
 {
+       struct net *net = cfg->fc_nlinfo.nl_net;
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
        struct rtnexthop *rtnh;
        int remaining;
@@ -496,6 +566,12 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
                return 1;
 
        if (cfg->fc_oif || cfg->fc_gw) {
+               if (cfg->fc_encap) {
+                       if (fib_encap_match(net, cfg->fc_encap_type,
+                                           cfg->fc_encap, cfg->fc_oif,
+                                           fi->fib_nh))
+                           return 1;
+               }
                if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
                    (!cfg->fc_gw  || cfg->fc_gw == fi->fib_nh->nh_gw))
                        return 0;
@@ -882,6 +958,21 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
        } else {
                struct fib_nh *nh = fi->fib_nh;
 
+               if (cfg->fc_encap) {
+                       struct lwtunnel_state *lwtstate;
+                       struct net_device *dev = NULL;
+
+                       if (cfg->fc_encap_type == LWTUNNEL_ENCAP_NONE)
+                               goto err_inval;
+                       if (cfg->fc_oif)
+                               dev = __dev_get_by_index(net, cfg->fc_oif);
+                       err = lwtunnel_build_state(dev, cfg->fc_encap_type,
+                                                  cfg->fc_encap, &lwtstate);
+                       if (err)
+                               goto failure;
+
+                       nh->nh_lwtstate = lwtstate_get(lwtstate);
+               }
                nh->nh_oif = cfg->fc_oif;
                nh->nh_gw = cfg->fc_gw;
                nh->nh_flags = cfg->fc_flags;
@@ -1055,6 +1146,8 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
                    nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
                        goto nla_put_failure;
 #endif
+               if (fi->fib_nh->nh_lwtstate)
+                       lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate);
        }
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
        if (fi->fib_nhs > 1) {
@@ -1090,6 +1183,8 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
                            nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
                                goto nla_put_failure;
 #endif
+                       if (nh->nh_lwtstate)
+                               lwtunnel_fill_encap(skb, nh->nh_lwtstate);
                        /* length of rtnetlink header + attributes */
                        rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
                } endfor_nexthops(fi);
index f5203fba623638d94b03435db86ac4ed696adba8..c0556f1e4bf09233970c8d5c3fd68afa9a78489f 100644 (file)
@@ -496,6 +496,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
                }
                /* Ugh! */
                orefdst = skb_in->_skb_refdst; /* save old refdst */
+               skb_dst_set(skb_in, NULL);
                err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr,
                                     RT_TOS(tos), rt2->dst.dev);
 
index 0cb9165421d450ae8f6aff81b88ef4bf2839ff51..89120196a94934e3bb1f201eef9ad7f936ad828b 100644 (file)
@@ -343,7 +343,6 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
        struct sock *sk2;
        const struct hlist_nulls_node *node;
        struct inet_timewait_sock *tw = NULL;
-       int twrefcnt = 0;
 
        spin_lock(lock);
 
@@ -371,21 +370,17 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
        WARN_ON(!sk_unhashed(sk));
        __sk_nulls_add_node_rcu(sk, &head->chain);
        if (tw) {
-               twrefcnt = inet_twsk_unhash(tw);
+               sk_nulls_del_node_init_rcu((struct sock *)tw);
                NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
        }
        spin_unlock(lock);
-       if (twrefcnt)
-               inet_twsk_put(tw);
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 
        if (twp) {
                *twp = tw;
        } else if (tw) {
                /* Silly. Should hash-dance instead... */
-               inet_twsk_deschedule(tw);
-
-               inet_twsk_put(tw);
+               inet_twsk_deschedule_put(tw);
        }
        return 0;
 
@@ -403,13 +398,12 @@ static u32 inet_sk_port_offset(const struct sock *sk)
                                          inet->inet_dport);
 }
 
-int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw)
+void __inet_hash_nolisten(struct sock *sk, struct sock *osk)
 {
        struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
        struct hlist_nulls_head *list;
        struct inet_ehash_bucket *head;
        spinlock_t *lock;
-       int twrefcnt = 0;
 
        WARN_ON(!sk_unhashed(sk));
 
@@ -420,23 +414,22 @@ int __inet_hash_nolisten(struct sock *sk, struct inet_timewait_sock *tw)
 
        spin_lock(lock);
        __sk_nulls_add_node_rcu(sk, list);
-       if (tw) {
-               WARN_ON(sk->sk_hash != tw->tw_hash);
-               twrefcnt = inet_twsk_unhash(tw);
+       if (osk) {
+               WARN_ON(sk->sk_hash != osk->sk_hash);
+               sk_nulls_del_node_init_rcu(osk);
        }
        spin_unlock(lock);
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
-       return twrefcnt;
 }
 EXPORT_SYMBOL_GPL(__inet_hash_nolisten);
 
-int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw)
+void __inet_hash(struct sock *sk, struct sock *osk)
 {
        struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
        struct inet_listen_hashbucket *ilb;
 
        if (sk->sk_state != TCP_LISTEN)
-               return __inet_hash_nolisten(sk, tw);
+               return __inet_hash_nolisten(sk, osk);
 
        WARN_ON(!sk_unhashed(sk));
        ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
@@ -445,7 +438,6 @@ int __inet_hash(struct sock *sk, struct inet_timewait_sock *tw)
        __sk_nulls_add_node_rcu(sk, &ilb->head);
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
        spin_unlock(&ilb->lock);
-       return 0;
 }
 EXPORT_SYMBOL(__inet_hash);
 
@@ -492,7 +484,6 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
        struct inet_bind_bucket *tb;
        int ret;
        struct net *net = sock_net(sk);
-       int twrefcnt = 1;
 
        if (!snum) {
                int i, remaining, low, high, port;
@@ -560,19 +551,14 @@ ok:
                inet_bind_hash(sk, tb, port);
                if (sk_unhashed(sk)) {
                        inet_sk(sk)->inet_sport = htons(port);
-                       twrefcnt += __inet_hash_nolisten(sk, tw);
+                       __inet_hash_nolisten(sk, (struct sock *)tw);
                }
                if (tw)
-                       twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
+                       inet_twsk_bind_unhash(tw, hinfo);
                spin_unlock(&head->lock);
 
-               if (tw) {
-                       inet_twsk_deschedule(tw);
-                       while (twrefcnt) {
-                               twrefcnt--;
-                               inet_twsk_put(tw);
-                       }
-               }
+               if (tw)
+                       inet_twsk_deschedule_put(tw);
 
                ret = 0;
                goto out;
index 2ffbd16b79e00279235244c3412046062a86fec5..ae22cc24fbe89b32be1f2142450c198e78026851 100644 (file)
 #include <net/ip.h>
 
 
-/**
- *     inet_twsk_unhash - unhash a timewait socket from established hash
- *     @tw: timewait socket
- *
- *     unhash a timewait socket from established hash, if hashed.
- *     ehash lock must be held by caller.
- *     Returns 1 if caller should call inet_twsk_put() after lock release.
- */
-int inet_twsk_unhash(struct inet_timewait_sock *tw)
-{
-       if (hlist_nulls_unhashed(&tw->tw_node))
-               return 0;
-
-       hlist_nulls_del_rcu(&tw->tw_node);
-       sk_nulls_node_init(&tw->tw_node);
-       /*
-        * We cannot call inet_twsk_put() ourself under lock,
-        * caller must call it for us.
-        */
-       return 1;
-}
-
 /**
  *     inet_twsk_bind_unhash - unhash a timewait socket from bind hash
  *     @tw: timewait socket
@@ -48,35 +26,29 @@ int inet_twsk_unhash(struct inet_timewait_sock *tw)
  *     bind hash lock must be held by caller.
  *     Returns 1 if caller should call inet_twsk_put() after lock release.
  */
-int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
+void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
                          struct inet_hashinfo *hashinfo)
 {
        struct inet_bind_bucket *tb = tw->tw_tb;
 
        if (!tb)
-               return 0;
+               return;
 
        __hlist_del(&tw->tw_bind_node);
        tw->tw_tb = NULL;
        inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
-       /*
-        * We cannot call inet_twsk_put() ourself under lock,
-        * caller must call it for us.
-        */
-       return 1;
+       __sock_put((struct sock *)tw);
 }
 
 /* Must be called with locally disabled BHs. */
 static void inet_twsk_kill(struct inet_timewait_sock *tw)
 {
        struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
-       struct inet_bind_hashbucket *bhead;
-       int refcnt;
-       /* Unlink from established hashes. */
        spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
+       struct inet_bind_hashbucket *bhead;
 
        spin_lock(lock);
-       refcnt = inet_twsk_unhash(tw);
+       sk_nulls_del_node_init_rcu((struct sock *)tw);
        spin_unlock(lock);
 
        /* Disassociate with bind bucket. */
@@ -84,11 +56,9 @@ static void inet_twsk_kill(struct inet_timewait_sock *tw)
                        hashinfo->bhash_size)];
 
        spin_lock(&bhead->lock);
-       refcnt += inet_twsk_bind_unhash(tw, hashinfo);
+       inet_twsk_bind_unhash(tw, hashinfo);
        spin_unlock(&bhead->lock);
 
-       BUG_ON(refcnt >= atomic_read(&tw->tw_refcnt));
-       atomic_sub(refcnt, &tw->tw_refcnt);
        atomic_dec(&tw->tw_dr->tw_count);
        inet_twsk_put(tw);
 }
@@ -235,13 +205,17 @@ EXPORT_SYMBOL_GPL(inet_twsk_alloc);
  * tcp_input.c to verify this.
  */
 
-/* This is for handling early-kills of TIME_WAIT sockets. */
-void inet_twsk_deschedule(struct inet_timewait_sock *tw)
+/* This is for handling early-kills of TIME_WAIT sockets.
+ * Warning : consume reference.
+ * Caller should not access tw anymore.
+ */
+void inet_twsk_deschedule_put(struct inet_timewait_sock *tw)
 {
        if (del_timer_sync(&tw->tw_timer))
                inet_twsk_kill(tw);
+       inet_twsk_put(tw);
 }
-EXPORT_SYMBOL(inet_twsk_deschedule);
+EXPORT_SYMBOL(inet_twsk_deschedule_put);
 
 void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo)
 {
@@ -311,9 +285,8 @@ restart:
 
                        rcu_read_unlock();
                        local_bh_disable();
-                       inet_twsk_deschedule(tw);
+                       inet_twsk_deschedule_put(tw);
                        local_bh_enable();
-                       inet_twsk_put(tw);
                        goto restart_rcu;
                }
                /* If the nulls value we got at the end of this lookup is
index 921138f6c97c9948a7cf5e2e36b7e3dbfabc6e29..d96722ae89796ef27ec725a695b60fbce4c47fbc 100644 (file)
@@ -522,7 +522,6 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
        int len;
        int ihlen;
        int err;
-       int sum_truesize;
        u8 ecn;
 
        ipq_kill(qp);
@@ -590,32 +589,19 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
                add_frag_mem_limit(qp->q.net, clone->truesize);
        }
 
+       skb_shinfo(head)->frag_list = head->next;
        skb_push(head, head->data - skb_network_header(head));
 
-       sum_truesize = head->truesize;
-       for (fp = head->next; fp;) {
-               bool headstolen;
-               int delta;
-               struct sk_buff *next = fp->next;
-
-               sum_truesize += fp->truesize;
+       for (fp=head->next; fp; fp = fp->next) {
+               head->data_len += fp->len;
+               head->len += fp->len;
                if (head->ip_summed != fp->ip_summed)
                        head->ip_summed = CHECKSUM_NONE;
                else if (head->ip_summed == CHECKSUM_COMPLETE)
                        head->csum = csum_add(head->csum, fp->csum);
-
-               if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
-                       kfree_skb_partial(fp, headstolen);
-               } else {
-                       if (!skb_shinfo(head)->frag_list)
-                               skb_shinfo(head)->frag_list = fp;
-                       head->data_len += fp->len;
-                       head->len += fp->len;
-                       head->truesize += fp->truesize;
-               }
-               fp = next;
+               head->truesize += fp->truesize;
        }
-       sub_frag_mem_limit(qp->q.net, sum_truesize);
+       sub_frag_mem_limit(qp->q.net, head->truesize);
 
        head->next = NULL;
        head->dev = dev;
index 2db4c8773c1b405da48758db66969060df2f0812..f4fc8a77aaa79dcb5156bfb2de84efdf24808713 100644 (file)
 #include <net/xfrm.h>
 #include <linux/mroute.h>
 #include <linux/netlink.h>
+#include <net/dst_metadata.h>
 
 /*
  *     Process Router Attention IP option (RFC 2113)
@@ -331,7 +332,7 @@ static int ip_rcv_finish(struct sock *sk, struct sk_buff *skb)
         *      Initialise the virtual path cache for the packet. It describes
         *      how the packet travels inside Linux networking.
         */
-       if (!skb_dst(skb)) {
+       if (!skb_valid_dst(skb)) {
                int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
                                               iph->tos, skb->dev);
                if (unlikely(err)) {
index 6a51a71a6c67a0f3e48523a37e3b559306885de9..5512f4e4ec1b1b629a9c1fe1c7ac2ac4ffced248 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/etherdevice.h>
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
+#include <linux/static_key.h>
 
 #include <net/ip.h>
 #include <net/icmp.h>
@@ -190,3 +191,123 @@ struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
        return tot;
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
+
+static const struct nla_policy ip_tun_policy[IP_TUN_MAX + 1] = {
+       [IP_TUN_ID]             = { .type = NLA_U64 },
+       [IP_TUN_DST]            = { .type = NLA_U32 },
+       [IP_TUN_SRC]            = { .type = NLA_U32 },
+       [IP_TUN_TTL]            = { .type = NLA_U8 },
+       [IP_TUN_TOS]            = { .type = NLA_U8 },
+       [IP_TUN_SPORT]          = { .type = NLA_U16 },
+       [IP_TUN_DPORT]          = { .type = NLA_U16 },
+       [IP_TUN_FLAGS]          = { .type = NLA_U16 },
+};
+
+static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
+                             struct lwtunnel_state **ts)
+{
+       struct ip_tunnel_info *tun_info;
+       struct lwtunnel_state *new_state;
+       struct nlattr *tb[IP_TUN_MAX + 1];
+       int err;
+
+       err = nla_parse_nested(tb, IP_TUN_MAX, attr, ip_tun_policy);
+       if (err < 0)
+               return err;
+
+       new_state = lwtunnel_state_alloc(sizeof(*tun_info));
+       if (!new_state)
+               return -ENOMEM;
+
+       new_state->type = LWTUNNEL_ENCAP_IP;
+
+       tun_info = lwt_tun_info(new_state);
+
+       if (tb[IP_TUN_ID])
+               tun_info->key.tun_id = nla_get_u64(tb[IP_TUN_ID]);
+
+       if (tb[IP_TUN_DST])
+               tun_info->key.ipv4_dst = nla_get_be32(tb[IP_TUN_DST]);
+
+       if (tb[IP_TUN_SRC])
+               tun_info->key.ipv4_src = nla_get_be32(tb[IP_TUN_SRC]);
+
+       if (tb[IP_TUN_TTL])
+               tun_info->key.ipv4_ttl = nla_get_u8(tb[IP_TUN_TTL]);
+
+       if (tb[IP_TUN_TOS])
+               tun_info->key.ipv4_tos = nla_get_u8(tb[IP_TUN_TOS]);
+
+       if (tb[IP_TUN_SPORT])
+               tun_info->key.tp_src = nla_get_be16(tb[IP_TUN_SPORT]);
+
+       if (tb[IP_TUN_DPORT])
+               tun_info->key.tp_dst = nla_get_be16(tb[IP_TUN_DPORT]);
+
+       if (tb[IP_TUN_FLAGS])
+               tun_info->key.tun_flags = nla_get_u16(tb[IP_TUN_FLAGS]);
+
+       tun_info->mode = IP_TUNNEL_INFO_TX;
+       tun_info->options = NULL;
+       tun_info->options_len = 0;
+
+       *ts = new_state;
+
+       return 0;
+}
+
+static int ip_tun_fill_encap_info(struct sk_buff *skb,
+                                 struct lwtunnel_state *lwtstate)
+{
+       struct ip_tunnel_info *tun_info = lwt_tun_info(lwtstate);
+
+       if (nla_put_u64(skb, IP_TUN_ID, tun_info->key.tun_id) ||
+           nla_put_be32(skb, IP_TUN_DST, tun_info->key.ipv4_dst) ||
+           nla_put_be32(skb, IP_TUN_SRC, tun_info->key.ipv4_src) ||
+           nla_put_u8(skb, IP_TUN_TOS, tun_info->key.ipv4_tos) ||
+           nla_put_u8(skb, IP_TUN_TTL, tun_info->key.ipv4_ttl) ||
+           nla_put_u16(skb, IP_TUN_SPORT, tun_info->key.tp_src) ||
+           nla_put_u16(skb, IP_TUN_DPORT, tun_info->key.tp_dst) ||
+           nla_put_u16(skb, IP_TUN_FLAGS, tun_info->key.tun_flags))
+               return -ENOMEM;
+
+       return 0;
+}
+
+static int ip_tun_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+       return nla_total_size(8)        /* IP_TUN_ID */
+               + nla_total_size(4)     /* IP_TUN_DST */
+               + nla_total_size(4)     /* IP_TUN_SRC */
+               + nla_total_size(1)     /* IP_TUN_TOS */
+               + nla_total_size(1)     /* IP_TUN_TTL */
+               + nla_total_size(2)     /* IP_TUN_SPORT */
+               + nla_total_size(2)     /* IP_TUN_DPORT */
+               + nla_total_size(2);    /* IP_TUN_FLAGS */
+}
+
+static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
+       .build_state = ip_tun_build_state,
+       .fill_encap = ip_tun_fill_encap_info,
+       .get_encap_size = ip_tun_encap_nlsize,
+};
+
+void __init ip_tunnel_core_init(void)
+{
+       lwtunnel_encap_add_ops(&ip_tun_lwt_ops, LWTUNNEL_ENCAP_IP);
+}
+
+struct static_key ip_tunnel_metadata_cnt = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL(ip_tunnel_metadata_cnt);
+
+void ip_tunnel_need_metadata(void)
+{
+       static_key_slow_inc(&ip_tunnel_metadata_cnt);
+}
+EXPORT_SYMBOL_GPL(ip_tunnel_need_metadata);
+
+void ip_tunnel_unneed_metadata(void)
+{
+       static_key_slow_dec(&ip_tunnel_metadata_cnt);
+}
+EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata);
index 05ff44b758dfee1e02996a3726ac63854a96ad16..e89094ab5ddb8ce2b6eb2d78a9a9046b42287bd5 100644 (file)
@@ -363,7 +363,8 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
                                                    scoped);
                rcu_read_unlock();
 
-               if (!(isk->freebind || isk->transparent || has_addr ||
+               if (!(net->ipv6.sysctl.ip_nonlocal_bind ||
+                     isk->freebind || isk->transparent || has_addr ||
                      addr_type == IPV6_ADDR_ANY))
                        return -EADDRNOTAVAIL;
 
index da5d483e236ac1e37b631c6091219fbefbe497b4..3abd9d7a3adf323bd688b1ab6dabda1248c67be1 100644 (file)
@@ -300,6 +300,8 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TCPACKSkippedChallenge", LINUX_MIB_TCPACKSKIPPEDCHALLENGE),
        SNMP_MIB_ITEM("TCPWinProbe", LINUX_MIB_TCPWINPROBE),
        SNMP_MIB_ITEM("TCPKeepAlive", LINUX_MIB_TCPKEEPALIVE),
+       SNMP_MIB_ITEM("TCPMTUPFail", LINUX_MIB_TCPMTUPFAIL),
+       SNMP_MIB_ITEM("TCPMTUPSuccess", LINUX_MIB_TCPMTUPSUCCESS),
        SNMP_MIB_SENTINEL
 };
 
index e681b852ced1d0c0cde984496d832d9cf3f7fad2..908f7ef2f12a1b27f033ed3600f5a2039b1bb0a8 100644 (file)
@@ -91,6 +91,7 @@
 #include <linux/slab.h>
 #include <linux/jhash.h>
 #include <net/dst.h>
+#include <net/dst_metadata.h>
 #include <net/net_namespace.h>
 #include <net/protocol.h>
 #include <net/ip.h>
 #include <net/tcp.h>
 #include <net/icmp.h>
 #include <net/xfrm.h>
+#include <net/lwtunnel.h>
 #include <net/netevent.h>
 #include <net/rtnetlink.h>
 #ifdef CONFIG_SYSCTL
 #include <linux/kmemleak.h>
 #endif
 #include <net/secure_seq.h>
+#include <net/ip_tunnels.h>
 
 #define RT_FL_TOS(oldflp4) \
        ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
@@ -1355,6 +1358,7 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
                list_del(&rt->rt_uncached);
                spin_unlock_bh(&ul->lock);
        }
+       lwtstate_put(rt->rt_lwtstate);
 }
 
 void rt_flush_dev(struct net_device *dev)
@@ -1403,6 +1407,7 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
 #ifdef CONFIG_IP_ROUTE_CLASSID
                rt->dst.tclassid = nh->nh_tclassid;
 #endif
+               rt->rt_lwtstate = lwtstate_get(nh->nh_lwtstate);
                if (unlikely(fnhe))
                        cached = rt_bind_exception(rt, fnhe, daddr);
                else if (!(rt->dst.flags & DST_NOCACHE))
@@ -1488,6 +1493,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        rth->rt_gateway = 0;
        rth->rt_uses_gateway = 0;
        INIT_LIST_HEAD(&rth->rt_uncached);
+       rth->rt_lwtstate = NULL;
        if (our) {
                rth->dst.input= ip_local_deliver;
                rth->rt_flags |= RTCF_LOCAL;
@@ -1546,7 +1552,6 @@ static int __mkroute_input(struct sk_buff *skb,
        struct rtable *rth;
        int err;
        struct in_device *out_dev;
-       unsigned int flags = 0;
        bool do_cache;
        u32 itag = 0;
 
@@ -1610,7 +1615,7 @@ static int __mkroute_input(struct sk_buff *skb,
        }
 
        rth->rt_genid = rt_genid_ipv4(dev_net(rth->dst.dev));
-       rth->rt_flags = flags;
+       rth->rt_flags = 0;
        rth->rt_type = res->type;
        rth->rt_is_input = 1;
        rth->rt_iif     = 0;
@@ -1618,12 +1623,15 @@ static int __mkroute_input(struct sk_buff *skb,
        rth->rt_gateway = 0;
        rth->rt_uses_gateway = 0;
        INIT_LIST_HEAD(&rth->rt_uncached);
+       rth->rt_lwtstate = NULL;
        RT_CACHE_STAT_INC(in_slow_tot);
 
        rth->dst.input = ip_forward;
        rth->dst.output = ip_output;
 
        rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag);
+       if (lwtunnel_output_redirect(rth->rt_lwtstate))
+               rth->dst.output = lwtunnel_output;
        skb_dst_set(skb, &rth->dst);
 out:
        err = 0;
@@ -1662,6 +1670,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
 {
        struct fib_result res;
        struct in_device *in_dev = __in_dev_get_rcu(dev);
+       struct ip_tunnel_info *tun_info;
        struct flowi4   fl4;
        unsigned int    flags = 0;
        u32             itag = 0;
@@ -1679,6 +1688,13 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
           by fib_lookup.
         */
 
+       tun_info = skb_tunnel_info(skb, AF_INET);
+       if (tun_info && tun_info->mode == IP_TUNNEL_INFO_RX)
+               fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
+       else
+               fl4.flowi4_tun_key.tun_id = 0;
+       skb_dst_drop(skb);
+
        if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
                goto martian_source;
 
@@ -1792,6 +1808,8 @@ local_input:
        rth->rt_gateway = 0;
        rth->rt_uses_gateway = 0;
        INIT_LIST_HEAD(&rth->rt_uncached);
+       rth->rt_lwtstate = NULL;
+
        RT_CACHE_STAT_INC(in_slow_tot);
        if (res.type == RTN_UNREACHABLE) {
                rth->dst.input= ip_error;
@@ -1981,7 +1999,7 @@ add:
        rth->rt_gateway = 0;
        rth->rt_uses_gateway = 0;
        INIT_LIST_HEAD(&rth->rt_uncached);
-
+       rth->rt_lwtstate = NULL;
        RT_CACHE_STAT_INC(out_slow_tot);
 
        if (flags & RTCF_LOCAL)
@@ -2261,7 +2279,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
                rt->rt_uses_gateway = ort->rt_uses_gateway;
 
                INIT_LIST_HEAD(&rt->rt_uncached);
-
+               rt->rt_lwtstate = NULL;
                dst_free(new);
        }
 
index c037644eafb7caadcb196b1c8b676bbc42abdb93..fd1405d37c149309882742fb12b07331e7282a95 100644 (file)
@@ -146,7 +146,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (!tcp_is_cwnd_limited(sk))
                return;
 
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
+       if (tcp_in_slow_start(tp))
                tcp_slow_start(tp, acked);
        else {
                bictcp_update(ca, tp->snd_cwnd);
index 8c6fd3d5e40feeb3c0b422d0e697e1a674b4f576..167b6a3e1b9868c88e5553b114556ae312dfb99f 100644 (file)
@@ -264,7 +264,7 @@ static void tcp_cdg_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        u32 prior_snd_cwnd;
        u32 incr;
 
-       if (tp->snd_cwnd < tp->snd_ssthresh && hystart_detect)
+       if (tcp_in_slow_start(tp) && hystart_detect)
                tcp_cdg_hystart_update(sk);
 
        if (after(ack, ca->rtt_seq) && ca->rtt.v64) {
index 84be008c945c654b692211b943f83e909a622516..a2ed23c595cf185cadbebcdf19e801012a64250a 100644 (file)
@@ -365,10 +365,8 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
  */
 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
 {
-       u32 cwnd = tp->snd_cwnd + acked;
+       u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh);
 
-       if (cwnd > tp->snd_ssthresh)
-               cwnd = tp->snd_ssthresh + 1;
        acked -= cwnd - tp->snd_cwnd;
        tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
 
@@ -413,7 +411,7 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                return;
 
        /* In "safe" area, increase. */
-       if (tp->snd_cwnd <= tp->snd_ssthresh) {
+       if (tcp_in_slow_start(tp)) {
                acked = tcp_slow_start(tp, acked);
                if (!acked)
                        return;
index 06d3d665a9fd1bfda5688907a284de83697273f6..28011fb1f4a2104a34f81fc0c9fb4a4382bdadac 100644 (file)
@@ -320,7 +320,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (!tcp_is_cwnd_limited(sk))
                return;
 
-       if (tp->snd_cwnd <= tp->snd_ssthresh) {
+       if (tcp_in_slow_start(tp)) {
                if (hystart && after(ack, ca->end_seq))
                        bictcp_hystart_reset(sk);
                acked = tcp_slow_start(tp, acked);
@@ -439,7 +439,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
                ca->delay_min = delay;
 
        /* hystart triggers when cwnd is larger than some threshold */
-       if (hystart && tp->snd_cwnd <= tp->snd_ssthresh &&
+       if (hystart && tcp_in_slow_start(tp) &&
            tp->snd_cwnd >= hystart_low_window)
                hystart_update(sk, delay);
 }
index 882c08aae2f58d02bb78212a4eba4d25d7e9c123..db7842495a641829a8725cb436ed2fb3aa5d53e4 100644 (file)
@@ -116,7 +116,7 @@ static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (!tcp_is_cwnd_limited(sk))
                return;
 
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
+       if (tcp_in_slow_start(tp))
                tcp_slow_start(tp, acked);
        else {
                /* Update AIMD parameters.
index 58469fff6c18fd444c95366caa04ab60965d654a..82f0d9ed60f50f27854fdb62a95281beed9df819 100644 (file)
@@ -236,7 +236,7 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (!tcp_is_cwnd_limited(sk))
                return;
 
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
+       if (tcp_in_slow_start(tp))
                tcp_slow_start(tp, acked);
        else {
                /* In dangerous area, increase slowly.
index f963b274f2b0436755ebe8bb5586b1ec9682c336..083831e359df92ca9ba0fe7dd5a7a76fe41a94b0 100644 (file)
@@ -112,7 +112,7 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 
        rho_fractions = ca->rho_3ls - (ca->rho << 3);
 
-       if (tp->snd_cwnd < tp->snd_ssthresh) {
+       if (tcp_in_slow_start(tp)) {
                /*
                 * slow start
                 *      INC = 2^RHO - 1
index f71002e4db0ba7fe8dfe35bb2196bbaae751ed59..2ab9bbb6faffb799560df98b093d4cbc1207d816 100644 (file)
@@ -268,7 +268,7 @@ static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                return;
 
        /* In slow start */
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
+       if (tcp_in_slow_start(tp))
                tcp_slow_start(tp, acked);
 
        else {
index 728f5b3d3c64197bb526240a078744d5a950c8ea..4e4d6bcd0ca973226b9ebcf233f777ab3c68be7e 100644 (file)
@@ -109,6 +109,7 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
 #define FLAG_SYN_ACKED         0x10 /* This ACK acknowledged SYN.              */
 #define FLAG_DATA_SACKED       0x20 /* New SACK.                               */
 #define FLAG_ECE               0x40 /* ECE in this ACK                         */
+#define FLAG_LOST_RETRANS      0x80 /* This ACK marks some retransmission lost */
 #define FLAG_SLOWPATH          0x100 /* Do not skip RFC checks for window update.*/
 #define FLAG_ORIG_SACK_ACKED   0x200 /* Never retransmitted data are (s)acked  */
 #define FLAG_SND_UNA_ADVANCED  0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
@@ -196,11 +197,13 @@ static void tcp_enter_quickack_mode(struct sock *sk)
  * and the session is not interactive.
  */
 
-static inline bool tcp_in_quickack_mode(const struct sock *sk)
+static bool tcp_in_quickack_mode(struct sock *sk)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
+       const struct dst_entry *dst = __sk_dst_get(sk);
 
-       return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
+       return (dst && dst_metric(dst, RTAX_QUICKACK)) ||
+               (icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong);
 }
 
 static void tcp_ecn_queue_cwr(struct tcp_sock *tp)
@@ -1037,7 +1040,7 @@ static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack,
  * highest SACK block). Also calculate the lowest snd_nxt among the remaining
  * retransmitted skbs to avoid some costly processing per ACKs.
  */
-static void tcp_mark_lost_retrans(struct sock *sk)
+static void tcp_mark_lost_retrans(struct sock *sk, int *flag)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
@@ -1078,7 +1081,7 @@ static void tcp_mark_lost_retrans(struct sock *sk)
                if (after(received_upto, ack_seq)) {
                        TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
                        tp->retrans_out -= tcp_skb_pcount(skb);
-
+                       *flag |= FLAG_LOST_RETRANS;
                        tcp_skb_mark_lost_uncond_verify(tp, skb);
                        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
                } else {
@@ -1818,7 +1821,7 @@ advance_sp:
            ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker))
                tcp_update_reordering(sk, tp->fackets_out - state->reord, 0);
 
-       tcp_mark_lost_retrans(sk);
+       tcp_mark_lost_retrans(sk, &state->flag);
        tcp_verify_left_out(tp);
 out:
 
@@ -2474,15 +2477,14 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
        return false;
 }
 
-/* The cwnd reduction in CWR and Recovery use the PRR algorithm
- * https://datatracker.ietf.org/doc/draft-ietf-tcpm-proportional-rate-reduction/
+/* The cwnd reduction in CWR and Recovery uses the PRR algorithm in RFC 6937.
  * It computes the number of packets to send (sndcnt) based on packets newly
  * delivered:
  *   1) If the packets in flight is larger than ssthresh, PRR spreads the
  *     cwnd reductions across a full RTT.
- *   2) If packets in flight is lower than ssthresh (such as due to excess
- *     losses and/or application stalls), do not perform any further cwnd
- *     reductions, but instead slow start up to ssthresh.
+ *   2) Otherwise PRR uses packet conservation to send as much as delivered.
+ *      But when the retransmits are acked without further losses, PRR
+ *      slow starts cwnd up to ssthresh to speed up the recovery.
  */
 static void tcp_init_cwnd_reduction(struct sock *sk)
 {
@@ -2499,7 +2501,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk)
 }
 
 static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
-                              int fast_rexmit)
+                              int fast_rexmit, int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        int sndcnt = 0;
@@ -2508,16 +2510,18 @@ static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
                                 (tp->packets_out - tp->sacked_out);
 
        tp->prr_delivered += newly_acked_sacked;
-       if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
+       if (delta < 0) {
                u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
                               tp->prior_cwnd - 1;
                sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
-       } else {
+       } else if ((flag & FLAG_RETRANS_DATA_ACKED) &&
+                  !(flag & FLAG_LOST_RETRANS)) {
                sndcnt = min_t(int, delta,
                               max_t(int, tp->prr_delivered - tp->prr_out,
                                     newly_acked_sacked) + 1);
+       } else {
+               sndcnt = min(delta, newly_acked_sacked);
        }
-
        sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
        tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
 }
@@ -2578,7 +2582,7 @@ static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked)
        if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
                tcp_try_keep_open(sk);
        } else {
-               tcp_cwnd_reduction(sk, prior_unsacked, 0);
+               tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
        }
 }
 
@@ -2588,6 +2592,7 @@ static void tcp_mtup_probe_failed(struct sock *sk)
 
        icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
        icsk->icsk_mtup.probe_size = 0;
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPFAIL);
 }
 
 static void tcp_mtup_probe_success(struct sock *sk)
@@ -2607,6 +2612,7 @@ static void tcp_mtup_probe_success(struct sock *sk)
        icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
        icsk->icsk_mtup.probe_size = 0;
        tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
 }
 
 /* Do a simple retransmit without using the backoff mechanisms in
@@ -2675,7 +2681,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
        tp->prior_ssthresh = 0;
        tcp_init_undo(tp);
 
-       if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
+       if (!tcp_in_cwnd_reduction(sk)) {
                if (!ece_ack)
                        tp->prior_ssthresh = tcp_current_ssthresh(sk);
                tcp_init_cwnd_reduction(sk);
@@ -2735,7 +2741,7 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
 
 /* Undo during fast recovery after partial ACK. */
 static bool tcp_try_undo_partial(struct sock *sk, const int acked,
-                                const int prior_unsacked)
+                                const int prior_unsacked, int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
@@ -2751,7 +2757,7 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked,
                 * mark more packets lost or retransmit more.
                 */
                if (tp->retrans_out) {
-                       tcp_cwnd_reduction(sk, prior_unsacked, 0);
+                       tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
                        return true;
                }
 
@@ -2838,7 +2844,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
                        if (tcp_is_reno(tp) && is_dupack)
                                tcp_add_reno_sack(sk);
                } else {
-                       if (tcp_try_undo_partial(sk, acked, prior_unsacked))
+                       if (tcp_try_undo_partial(sk, acked, prior_unsacked, flag))
                                return;
                        /* Partial ACK arrived. Force fast retransmit. */
                        do_lost = tcp_is_reno(tp) ||
@@ -2851,9 +2857,10 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
                break;
        case TCP_CA_Loss:
                tcp_process_loss(sk, flag, is_dupack);
-               if (icsk->icsk_ca_state != TCP_CA_Open)
+               if (icsk->icsk_ca_state != TCP_CA_Open &&
+                   !(flag & FLAG_LOST_RETRANS))
                        return;
-               /* Fall through to processing in Open state. */
+               /* Change state if cwnd is undone or retransmits are lost */
        default:
                if (tcp_is_reno(tp)) {
                        if (flag & FLAG_SND_UNA_ADVANCED)
@@ -2888,7 +2895,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
 
        if (do_lost)
                tcp_update_scoreboard(sk, fast_rexmit);
-       tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit);
+       tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit, flag);
        tcp_xmit_retransmit_queue(sk);
 }
 
@@ -3562,10 +3569,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
                                    &sack_state);
        acked -= tp->packets_out;
 
-       /* Advance cwnd if state allows */
-       if (tcp_may_raise_cwnd(sk, flag))
-               tcp_cong_avoid(sk, ack, acked);
-
        if (tcp_ack_is_dubious(sk, flag)) {
                is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
                tcp_fastretrans_alert(sk, acked, prior_unsacked,
@@ -3574,6 +3577,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        if (tp->tlp_high_seq)
                tcp_process_tlp_ack(sk, ack, flag);
 
+       /* Advance cwnd if state allows */
+       if (tcp_may_raise_cwnd(sk, flag))
+               tcp_cong_avoid(sk, ack, acked);
+
        if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) {
                struct dst_entry *dst = __sk_dst_get(sk);
                if (dst)
@@ -3947,7 +3954,6 @@ void tcp_reset(struct sock *sk)
 static void tcp_fin(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       const struct dst_entry *dst;
 
        inet_csk_schedule_ack(sk);
 
@@ -3959,9 +3965,7 @@ static void tcp_fin(struct sock *sk)
        case TCP_ESTABLISHED:
                /* Move to CLOSE_WAIT */
                tcp_set_state(sk, TCP_CLOSE_WAIT);
-               dst = __sk_dst_get(sk);
-               if (!dst || !dst_metric(dst, RTAX_QUICKACK))
-                       inet_csk(sk)->icsk_ack.pingpong = 1;
+               inet_csk(sk)->icsk_ack.pingpong = 1;
                break;
 
        case TCP_CLOSE_WAIT:
index d7d4c2b79cf2f516f9e3f62c6fe4415e9bc137a0..d27eb549ced6b4bba76fcd3a4286c8ab0b41478f 100644 (file)
@@ -222,7 +222,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        if (err)
                goto failure;
 
-       inet_set_txhash(sk);
+       sk_set_txhash(sk);
 
        rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
                               inet->inet_sport, inet->inet_dport, sk);
@@ -1277,7 +1277,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        newinet->mc_ttl       = ip_hdr(skb)->ttl;
        newinet->rcv_tos      = ip_hdr(skb)->tos;
        inet_csk(newsk)->icsk_ext_hdr_len = 0;
-       inet_set_txhash(newsk);
+       sk_set_txhash(newsk);
        if (inet_opt)
                inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
        newinet->inet_id = newtp->write_seq ^ jiffies;
@@ -1683,8 +1683,7 @@ do_time_wait:
                                                        iph->daddr, th->dest,
                                                        inet_iif(skb));
                if (sk2) {
-                       inet_twsk_deschedule(inet_twsk(sk));
-                       inet_twsk_put(inet_twsk(sk));
+                       inet_twsk_deschedule_put(inet_twsk(sk));
                        sk = sk2;
                        goto process;
                }
index a51d63a43e33af5fc751e4f0f3369b9394776975..b3d64f61d922e1ec10aa31b4e19ea0fb6c6876be 100644 (file)
@@ -461,7 +461,7 @@ void tcp_update_metrics(struct sock *sk)
                                tcp_metric_set(tm, TCP_METRIC_CWND,
                                               tp->snd_cwnd);
                }
-       } else if (tp->snd_cwnd > tp->snd_ssthresh &&
+       } else if (!tcp_in_slow_start(tp) &&
                   icsk->icsk_ca_state == TCP_CA_Open) {
                /* Cong. avoidance phase, cwnd is reliable. */
                if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
index 4bc00cb79e603553076adf750712377586f4b2fb..6d8795b066aca708df47de3c9211f36bee5eb1d4 100644 (file)
@@ -147,8 +147,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
                if (!th->fin ||
                    TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) {
 kill_with_rst:
-                       inet_twsk_deschedule(tw);
-                       inet_twsk_put(tw);
+                       inet_twsk_deschedule_put(tw);
                        return TCP_TW_RST;
                }
 
@@ -198,8 +197,7 @@ kill_with_rst:
                         */
                        if (sysctl_tcp_rfc1337 == 0) {
 kill:
-                               inet_twsk_deschedule(tw);
-                               inet_twsk_put(tw);
+                               inet_twsk_deschedule_put(tw);
                                return TCP_TW_SUCCESS;
                        }
                }
index b1c218df2c855bc56594ffdd86d75ef5e146731a..7d1efa762b75b04e982c14da36c12595b38dc880 100644 (file)
@@ -163,7 +163,6 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        const u32 now = tcp_time_stamp;
-       const struct dst_entry *dst = __sk_dst_get(sk);
 
        if (sysctl_tcp_slow_start_after_idle &&
            (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
@@ -174,9 +173,8 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
        /* If it is a reply for ato after last received
         * packet, enter pingpong mode.
         */
-       if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato &&
-           (!dst || !dst_metric(dst, RTAX_QUICKACK)))
-                       icsk->icsk_ack.pingpong = 1;
+       if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
+               icsk->icsk_ack.pingpong = 1;
 }
 
 /* Account for an ACK we sent. */
@@ -1776,7 +1774,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
        if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
                goto send_now;
 
-       if (!((1 << icsk->icsk_ca_state) & (TCPF_CA_Open | TCPF_CA_CWR)))
+       if (icsk->icsk_ca_state >= TCP_CA_Recovery)
                goto send_now;
 
        /* Avoid bursty behavior by allowing defer
index 333bcb2415ffca51e06f3042ae3d94b8e21c0725..bf5ea9e9bbc1ed3c07c03f9db69b9848cf83ec8e 100644 (file)
@@ -22,7 +22,7 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
        if (!tcp_is_cwnd_limited(sk))
                return;
 
-       if (tp->snd_cwnd <= tp->snd_ssthresh)
+       if (tcp_in_slow_start(tp))
                tcp_slow_start(tp, acked);
        else
                tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
index 5b752f58a90063e7468b11f2853c7c006b679e60..7149ebc820c7d87afef856ce641ac63678abafa3 100644 (file)
@@ -649,4 +649,3 @@ void tcp_init_xmit_timers(struct sock *sk)
        inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
                                  &tcp_keepalive_timer);
 }
-EXPORT_SYMBOL(tcp_init_xmit_timers);
index a6cea1d5e20d47f06eab95f3344a3e3b7c44da89..13951c4087d407b72cb5bc2ee75822203244e3f3 100644 (file)
@@ -225,7 +225,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                         */
                        diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;
 
-                       if (diff > gamma && tp->snd_cwnd <= tp->snd_ssthresh) {
+                       if (diff > gamma && tcp_in_slow_start(tp)) {
                                /* Going too fast. Time to slow down
                                 * and switch to congestion avoidance.
                                 */
@@ -240,7 +240,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                                tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
                                tp->snd_ssthresh = tcp_vegas_ssthresh(tp);
 
-                       } else if (tp->snd_cwnd <= tp->snd_ssthresh) {
+                       } else if (tcp_in_slow_start(tp)) {
                                /* Slow start.  */
                                tcp_slow_start(tp, acked);
                        } else {
@@ -281,7 +281,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
                vegas->minRTT = 0x7fffffff;
        }
        /* Use normal slow start */
-       else if (tp->snd_cwnd <= tp->snd_ssthresh)
+       else if (tcp_in_slow_start(tp))
                tcp_slow_start(tp, acked);
 }
 
index 112151eeee45bff0c37ac92d78d165ba92bd4d0a..0d094b995cd96f8c5150daf586cdde0f495843f5 100644 (file)
@@ -150,7 +150,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 
                veno->diff = (tp->snd_cwnd << V_PARAM_SHIFT) - target_cwnd;
 
-               if (tp->snd_cwnd <= tp->snd_ssthresh) {
+               if (tcp_in_slow_start(tp)) {
                        /* Slow start.  */
                        tcp_slow_start(tp, acked);
                } else {
index 438a73aa777cf560f38a87801b03b8ce20a315b1..643f61339e7b4fc9d4dcba75c4bb772c99d39292 100644 (file)
@@ -5,16 +5,15 @@
 #   IPv6 as module will cause a CRASH if you try to unload it
 menuconfig IPV6
        tristate "The IPv6 protocol"
-       default m
+       default y
        ---help---
-         This is complemental support for the IP version 6.
-         You will still be able to do traditional IPv4 networking as well.
+         Support for IP version 6 (IPv6).
 
          For general information about IPv6, see
          <https://en.wikipedia.org/wiki/IPv6>.
-         For Linux IPv6 development information, see <http://www.linux-ipv6.org>.
-         For specific information about IPv6 under Linux, read the HOWTO at
-         <http://www.bieringer.de/linux/IPv6/>.
+         For specific information about IPv6 under Linux, see
+         Documentation/networking/ipv6.txt and read the HOWTO at
+         <http://www.tldp.org/HOWTO/Linux+IPv6-HOWTO/>
 
          To compile this protocol support as a module, choose M here: the 
          module will be called ipv6.
index 21c2c818df3b8379226555268ef526c08553d00d..53e3a9d756b0d804e873c80a820383b756a0624b 100644 (file)
@@ -195,6 +195,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
        .max_addresses          = IPV6_MAX_ADDRESSES,
        .accept_ra_defrtr       = 1,
        .accept_ra_from_local   = 0,
+       .accept_ra_min_hop_limit= 1,
        .accept_ra_pinfo        = 1,
 #ifdef CONFIG_IPV6_ROUTER_PREF
        .accept_ra_rtr_pref     = 1,
@@ -211,7 +212,8 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
        .accept_ra_mtu          = 1,
        .stable_secret          = {
                .initialized = false,
-       }
+       },
+       .use_oif_addrs_only     = 0,
 };
 
 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -236,6 +238,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
        .max_addresses          = IPV6_MAX_ADDRESSES,
        .accept_ra_defrtr       = 1,
        .accept_ra_from_local   = 0,
+       .accept_ra_min_hop_limit= 1,
        .accept_ra_pinfo        = 1,
 #ifdef CONFIG_IPV6_ROUTER_PREF
        .accept_ra_rtr_pref     = 1,
@@ -253,6 +256,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
        .stable_secret          = {
                .initialized = false,
        },
+       .use_oif_addrs_only     = 0,
 };
 
 /* Check if a valid qdisc is available */
@@ -1358,15 +1362,96 @@ out:
        return ret;
 }
 
+static int __ipv6_dev_get_saddr(struct net *net,
+                               struct ipv6_saddr_dst *dst,
+                               struct inet6_dev *idev,
+                               struct ipv6_saddr_score *scores,
+                               int hiscore_idx)
+{
+       struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx];
+
+       read_lock_bh(&idev->lock);
+       list_for_each_entry(score->ifa, &idev->addr_list, if_list) {
+               int i;
+
+               /*
+                * - Tentative Address (RFC2462 section 5.4)
+                *  - A tentative address is not considered
+                *    "assigned to an interface" in the traditional
+                *    sense, unless it is also flagged as optimistic.
+                * - Candidate Source Address (section 4)
+                *  - In any case, anycast addresses, multicast
+                *    addresses, and the unspecified address MUST
+                *    NOT be included in a candidate set.
+                */
+               if ((score->ifa->flags & IFA_F_TENTATIVE) &&
+                   (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
+                       continue;
+
+               score->addr_type = __ipv6_addr_type(&score->ifa->addr);
+
+               if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
+                            score->addr_type & IPV6_ADDR_MULTICAST)) {
+                       net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
+                                           idev->dev->name);
+                       continue;
+               }
+
+               score->rule = -1;
+               bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
+
+               for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
+                       int minihiscore, miniscore;
+
+                       minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i);
+                       miniscore = ipv6_get_saddr_eval(net, score, dst, i);
+
+                       if (minihiscore > miniscore) {
+                               if (i == IPV6_SADDR_RULE_SCOPE &&
+                                   score->scopedist > 0) {
+                                       /*
+                                        * special case:
+                                        * each remaining entry
+                                        * has too small (not enough)
+                                        * scope, because ifa entries
+                                        * are sorted by their scope
+                                        * values.
+                                        */
+                                       goto out;
+                               }
+                               break;
+                       } else if (minihiscore < miniscore) {
+                               if (hiscore->ifa)
+                                       in6_ifa_put(hiscore->ifa);
+
+                               in6_ifa_hold(score->ifa);
+
+                               swap(hiscore, score);
+                               hiscore_idx = 1 - hiscore_idx;
+
+                               /* restore our iterator */
+                               score->ifa = hiscore->ifa;
+
+                               break;
+                       }
+               }
+       }
+out:
+       read_unlock_bh(&idev->lock);
+       return hiscore_idx;
+}
+
 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
                       const struct in6_addr *daddr, unsigned int prefs,
                       struct in6_addr *saddr)
 {
-       struct ipv6_saddr_score scores[2],
-                               *score = &scores[0], *hiscore = &scores[1];
+       struct ipv6_saddr_score scores[2], *hiscore;
        struct ipv6_saddr_dst dst;
+       struct inet6_dev *idev;
        struct net_device *dev;
        int dst_type;
+       bool use_oif_addr = false;
+       int hiscore_idx = 0;
 
        dst_type = __ipv6_addr_type(daddr);
        dst.addr = daddr;
@@ -1375,105 +1460,50 @@ int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
        dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
        dst.prefs = prefs;
 
-       hiscore->rule = -1;
-       hiscore->ifa = NULL;
+       scores[hiscore_idx].rule = -1;
+       scores[hiscore_idx].ifa = NULL;
 
        rcu_read_lock();
 
-       for_each_netdev_rcu(net, dev) {
-               struct inet6_dev *idev;
-
-               /* Candidate Source Address (section 4)
-                *  - multicast and link-local destination address,
-                *    the set of candidate source address MUST only
-                *    include addresses assigned to interfaces
-                *    belonging to the same link as the outgoing
-                *    interface.
-                * (- For site-local destination addresses, the
-                *    set of candidate source addresses MUST only
-                *    include addresses assigned to interfaces
-                *    belonging to the same site as the outgoing
-                *    interface.)
-                */
-               if (((dst_type & IPV6_ADDR_MULTICAST) ||
-                    dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL) &&
-                   dst.ifindex && dev->ifindex != dst.ifindex)
-                       continue;
-
-               idev = __in6_dev_get(dev);
-               if (!idev)
-                       continue;
-
-               read_lock_bh(&idev->lock);
-               list_for_each_entry(score->ifa, &idev->addr_list, if_list) {
-                       int i;
-
-                       /*
-                        * - Tentative Address (RFC2462 section 5.4)
-                        *  - A tentative address is not considered
-                        *    "assigned to an interface" in the traditional
-                        *    sense, unless it is also flagged as optimistic.
-                        * - Candidate Source Address (section 4)
-                        *  - In any case, anycast addresses, multicast
-                        *    addresses, and the unspecified address MUST
-                        *    NOT be included in a candidate set.
-                        */
-                       if ((score->ifa->flags & IFA_F_TENTATIVE) &&
-                           (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
-                               continue;
-
-                       score->addr_type = __ipv6_addr_type(&score->ifa->addr);
+       /* Candidate Source Address (section 4)
+        *  - multicast and link-local destination address,
+        *    the set of candidate source address MUST only
+        *    include addresses assigned to interfaces
+        *    belonging to the same link as the outgoing
+        *    interface.
+        * (- For site-local destination addresses, the
+        *    set of candidate source addresses MUST only
+        *    include addresses assigned to interfaces
+        *    belonging to the same site as the outgoing
+        *    interface.)
+        *  - "It is RECOMMENDED that the candidate source addresses
+        *    be the set of unicast addresses assigned to the
+        *    interface that will be used to send to the destination
+        *    (the 'outgoing' interface)." (RFC 6724)
+        */
+       if (dst_dev) {
+               idev = __in6_dev_get(dst_dev);
+               if ((dst_type & IPV6_ADDR_MULTICAST) ||
+                   dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
+                   (idev && idev->cnf.use_oif_addrs_only)) {
+                       use_oif_addr = true;
+               }
+       }
 
-                       if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
-                                    score->addr_type & IPV6_ADDR_MULTICAST)) {
-                               net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
-                                                   dev->name);
+       if (use_oif_addr) {
+               if (idev)
+                       hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
+       } else {
+               for_each_netdev_rcu(net, dev) {
+                       idev = __in6_dev_get(dev);
+                       if (!idev)
                                continue;
-                       }
-
-                       score->rule = -1;
-                       bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
-
-                       for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
-                               int minihiscore, miniscore;
-
-                               minihiscore = ipv6_get_saddr_eval(net, hiscore, &dst, i);
-                               miniscore = ipv6_get_saddr_eval(net, score, &dst, i);
-
-                               if (minihiscore > miniscore) {
-                                       if (i == IPV6_SADDR_RULE_SCOPE &&
-                                           score->scopedist > 0) {
-                                               /*
-                                                * special case:
-                                                * each remaining entry
-                                                * has too small (not enough)
-                                                * scope, because ifa entries
-                                                * are sorted by their scope
-                                                * values.
-                                                */
-                                               goto try_nextdev;
-                                       }
-                                       break;
-                               } else if (minihiscore < miniscore) {
-                                       if (hiscore->ifa)
-                                               in6_ifa_put(hiscore->ifa);
-
-                                       in6_ifa_hold(score->ifa);
-
-                                       swap(hiscore, score);
-
-                                       /* restore our iterator */
-                                       score->ifa = hiscore->ifa;
-
-                                       break;
-                               }
-                       }
+                       hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
                }
-try_nextdev:
-               read_unlock_bh(&idev->lock);
        }
        rcu_read_unlock();
 
+       hiscore = &scores[hiscore_idx];
        if (!hiscore->ifa)
                return -EADDRNOTAVAIL;
 
@@ -4560,6 +4590,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
        array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
        array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
        array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
+       array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
        array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
 #ifdef CONFIG_IPV6_ROUTER_PREF
        array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
@@ -4586,6 +4617,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
        array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
        array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
        /* we omit DEVCONF_STABLE_SECRET for now */
+       array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
 }
 
 static inline size_t inet6_ifla6_size(void)
@@ -5455,6 +5487,13 @@ static struct addrconf_sysctl_table
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec,
                },
+               {
+                       .procname       = "accept_ra_min_hop_limit",
+                       .data           = &ipv6_devconf.accept_ra_min_hop_limit,
+                       .maxlen         = sizeof(int),
+                       .mode           = 0644,
+                       .proc_handler   = proc_dointvec,
+               },
                {
                        .procname       = "accept_ra_pinfo",
                        .data           = &ipv6_devconf.accept_ra_pinfo,
@@ -5584,6 +5623,14 @@ static struct addrconf_sysctl_table
                        .mode           = 0600,
                        .proc_handler   = addrconf_sysctl_stable_secret,
                },
+               {
+                       .procname       = "use_oif_addrs_only",
+                       .data           = &ipv6_devconf.use_oif_addrs_only,
+                       .maxlen         = sizeof(int),
+                       .mode           = 0644,
+                       .proc_handler   = proc_dointvec,
+
+               },
                {
                        /* sentinel */
                }
index ca09bf49ac6806b399dba51399f84e47590cb9ed..bfa941fc1165002903b5a0364e5584b075469e2e 100644 (file)
@@ -107,7 +107,16 @@ int inet6addr_notifier_call_chain(unsigned long val, void *v)
 }
 EXPORT_SYMBOL(inet6addr_notifier_call_chain);
 
-const struct ipv6_stub *ipv6_stub __read_mostly;
+static int eafnosupport_ipv6_dst_lookup(struct net *net, struct sock *u1,
+                                       struct dst_entry **u2,
+                                       struct flowi6 *u3)
+{
+       return -EAFNOSUPPORT;
+}
+
+const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) {
+       .ipv6_dst_lookup = eafnosupport_ipv6_dst_lookup,
+};
 EXPORT_SYMBOL_GPL(ipv6_stub);
 
 /* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
index 7de52b65173fa6a1b344b13e67106ad39591ed06..44bb66bde0e2d97308c3c68a8d6b225ce04d08a8 100644 (file)
@@ -197,6 +197,7 @@ lookup_protocol:
        np->mcast_hops  = IPV6_DEFAULT_MCASTHOPS;
        np->mc_loop     = 1;
        np->pmtudisc    = IPV6_PMTUDISC_WANT;
+       np->autoflowlabel = ip6_default_np_autolabel(sock_net(sk));
        sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
 
        /* Init the ipv4 part of the socket since we can have sockets
@@ -342,7 +343,8 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                         */
                        v4addr = LOOPBACK4_IPV6;
                        if (!(addr_type & IPV6_ADDR_MULTICAST)) {
-                               if (!(inet->freebind || inet->transparent) &&
+                               if (!net->ipv6.sysctl.ip_nonlocal_bind &&
+                                   !(inet->freebind || inet->transparent) &&
                                    !ipv6_chk_addr(net, &addr->sin6_addr,
                                                   dev, 0)) {
                                        err = -EADDRNOTAVAIL;
@@ -679,8 +681,8 @@ bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
        const struct ipv6_pinfo *np = inet6_sk(sk);
 
        if (np->rxopt.all) {
-               if ((opt->hop && (np->rxopt.bits.hopopts ||
-                                 np->rxopt.bits.ohopopts)) ||
+               if (((opt->flags & IP6SKB_HOPBYHOP) &&
+                    (np->rxopt.bits.hopopts || np->rxopt.bits.ohopopts)) ||
                    (ip6_flowinfo((struct ipv6hdr *) skb_network_header(skb)) &&
                     np->rxopt.bits.rxflow) ||
                    (opt->srcrt && (np->rxopt.bits.srcrt ||
@@ -766,10 +768,10 @@ static int __net_init inet6_net_init(struct net *net)
        net->ipv6.sysctl.bindv6only = 0;
        net->ipv6.sysctl.icmpv6_time = 1*HZ;
        net->ipv6.sysctl.flowlabel_consistency = 1;
-       net->ipv6.sysctl.auto_flowlabels = 0;
+       net->ipv6.sysctl.auto_flowlabels = IP6_DEFAULT_AUTO_FLOW_LABELS;
        net->ipv6.sysctl.idgen_retries = 3;
        net->ipv6.sysctl.idgen_delay = 1 * HZ;
-       net->ipv6.sysctl.flowlabel_state_ranges = 1;
+       net->ipv6.sysctl.flowlabel_state_ranges = 0;
        atomic_set(&net->ipv6.fib6_sernum, 1);
 
        err = ipv6_init_mibs(net);
index b10a88986a9896a4a33f8a4139e41d3f1013a41a..9aadd57808a515dda6edbf4b784aae2179604628 100644 (file)
@@ -199,7 +199,7 @@ ipv4_connected:
                      NULL);
 
        sk->sk_state = TCP_ESTABLISHED;
-       ip6_set_txhash(sk);
+       sk_set_txhash(sk);
 out:
        fl6_sock_release(flowlabel);
        return err;
@@ -568,8 +568,8 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
        }
 
        /* HbH is allowed only once */
-       if (np->rxopt.bits.hopopts && opt->hop) {
-               u8 *ptr = nh + opt->hop;
+       if (np->rxopt.bits.hopopts && (opt->flags & IP6SKB_HOPBYHOP)) {
+               u8 *ptr = nh + sizeof(struct ipv6hdr);
                put_cmsg(msg, SOL_IPV6, IPV6_HOPOPTS, (ptr[1]+1)<<3, ptr);
        }
 
@@ -630,8 +630,8 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
                int hlim = ipv6_hdr(skb)->hop_limit;
                put_cmsg(msg, SOL_IPV6, IPV6_2292HOPLIMIT, sizeof(hlim), &hlim);
        }
-       if (np->rxopt.bits.ohopopts && opt->hop) {
-               u8 *ptr = nh + opt->hop;
+       if (np->rxopt.bits.ohopopts && (opt->flags & IP6SKB_HOPBYHOP)) {
+               u8 *ptr = nh + sizeof(struct ipv6hdr);
                put_cmsg(msg, SOL_IPV6, IPV6_2292HOPOPTS, (ptr[1]+1)<<3, ptr);
        }
        if (np->rxopt.bits.odstopts && opt->dst0) {
index a7bbbe45570b287eb05b9f13d0a73a830b767dd2..ce203b0402bea3b16deb34b4835cd2e89e94f899 100644 (file)
@@ -632,7 +632,7 @@ int ipv6_parse_hopopts(struct sk_buff *skb)
                return -1;
        }
 
-       opt->hop = sizeof(struct ipv6hdr);
+       opt->flags |= IP6SKB_HOPBYHOP;
        if (ip6_parse_tlv(tlvprochopopt_lst, skb)) {
                skb->transport_header += (skb_transport_header(skb)[1] + 1) << 3;
                opt = IP6CB(skb);
index 713d7434c9112432f800f377925ca68534094c34..6c2b2132c8d328e4d947c3b0b8216ea40f582f90 100644 (file)
@@ -329,7 +329,7 @@ static struct dst_entry *icmpv6_route_lookup(struct net *net,
        struct flowi6 fl2;
        int err;
 
-       err = ip6_dst_lookup(sk, &dst, fl6);
+       err = ip6_dst_lookup(net, sk, &dst, fl6);
        if (err)
                return ERR_PTR(err);
 
@@ -361,7 +361,7 @@ static struct dst_entry *icmpv6_route_lookup(struct net *net,
        if (err)
                goto relookup_failed;
 
-       err = ip6_dst_lookup(sk, &dst2, &fl2);
+       err = ip6_dst_lookup(net, sk, &dst2, &fl2);
        if (err)
                goto relookup_failed;
 
@@ -591,7 +591,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        else if (!fl6.flowi6_oif)
                fl6.flowi6_oif = np->ucast_oif;
 
-       err = ip6_dst_lookup(sk, &dst, &fl6);
+       err = ip6_dst_lookup(net, sk, &dst, &fl6);
        if (err)
                goto out;
        dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0);
index b4fd96de97e61627003eff220e10bdd05a899e28..6ac8dad0138a6b41395f306bffc2d9b47d8d91bc 100644 (file)
@@ -207,7 +207,6 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
        struct sock *sk2;
        const struct hlist_nulls_node *node;
        struct inet_timewait_sock *tw = NULL;
-       int twrefcnt = 0;
 
        spin_lock(lock);
 
@@ -234,21 +233,17 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
        WARN_ON(!sk_unhashed(sk));
        __sk_nulls_add_node_rcu(sk, &head->chain);
        if (tw) {
-               twrefcnt = inet_twsk_unhash(tw);
+               sk_nulls_del_node_init_rcu((struct sock *)tw);
                NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
        }
        spin_unlock(lock);
-       if (twrefcnt)
-               inet_twsk_put(tw);
        sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
 
        if (twp) {
                *twp = tw;
        } else if (tw) {
                /* Silly. Should hash-dance instead... */
-               inet_twsk_deschedule(tw);
-
-               inet_twsk_put(tw);
+               inet_twsk_deschedule_put(tw);
        }
        return 0;
 
index 55d19861ab20f4a91b6b289be7ca3b0250df4531..5693b5eb84820fceb7feb2f87345cd38b2613c6e 100644 (file)
@@ -32,6 +32,7 @@
 #include <net/ipv6.h>
 #include <net/ndisc.h>
 #include <net/addrconf.h>
+#include <net/lwtunnel.h>
 
 #include <net/ip6_fib.h>
 #include <net/ip6_route.h>
@@ -177,6 +178,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
 static void rt6_release(struct rt6_info *rt)
 {
        if (atomic_dec_and_test(&rt->rt6i_ref)) {
+               lwtstate_put(rt->rt6i_lwtstate);
                rt6_free_pcpu(rt);
                dst_free(&rt->dst);
        }
index a38d3ac0f18f6e631e3a17904bf617f7a0dfe28a..34f121812a1484e0ff23ea337ee39f35424a9628 100644 (file)
@@ -728,7 +728,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
         */
        ipv6h = ipv6_hdr(skb);
        ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
-                    ip6_make_flowlabel(net, skb, fl6->flowlabel, false));
+                    ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
        ipv6h->hop_limit = tunnel->parms.hop_limit;
        ipv6h->nexthdr = proto;
        ipv6h->saddr = fl6->saddr;
@@ -1182,7 +1182,8 @@ static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
 
        ip6_flow_hdr(ipv6h, 0,
                     ip6_make_flowlabel(dev_net(dev), skb,
-                                       t->fl.u.ip6.flowlabel, false));
+                                       t->fl.u.ip6.flowlabel, true,
+                                       &t->fl.u.ip6));
        ipv6h->hop_limit = t->parms.hop_limit;
        ipv6h->nexthdr = NEXTHDR_GRE;
        ipv6h->saddr = t->parms.laddr;
index 57990c929cd8156ebac52c648deb50fd3f74ab82..adba03ac7ce9671f6dba419d163672b9520743bd 100644 (file)
@@ -45,6 +45,7 @@
 #include <net/addrconf.h>
 #include <net/xfrm.h>
 #include <net/inet_ecn.h>
+#include <net/dst_metadata.h>
 
 int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb)
 {
@@ -55,7 +56,7 @@ int ip6_rcv_finish(struct sock *sk, struct sk_buff *skb)
                if (ipprot && ipprot->early_demux)
                        ipprot->early_demux(skb);
        }
-       if (!skb_dst(skb))
+       if (!skb_valid_dst(skb))
                ip6_route_input(skb);
 
        return dst_input(skb);
@@ -98,7 +99,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
         * arrived via the sending interface (ethX), because of the
         * nature of scoping architecture. --yoshfuji
         */
-       IP6CB(skb)->iif = skb_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex;
+       IP6CB(skb)->iif = skb_valid_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex;
 
        if (unlikely(!pskb_may_pull(skb, sizeof(*hdr))))
                goto err;
index d5f7716662dbc361e93ebc443e72ee5fb7343b10..26ea4793074004d0af1026bb378860b53baa0ad2 100644 (file)
@@ -207,7 +207,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
                hlimit = ip6_dst_hoplimit(dst);
 
        ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
-                                                    np->autoflowlabel));
+                                                    np->autoflowlabel, fl6));
 
        hdr->payload_len = htons(seg_len);
        hdr->nexthdr = proto;
@@ -881,10 +881,9 @@ out:
        return dst;
 }
 
-static int ip6_dst_lookup_tail(struct sock *sk,
+static int ip6_dst_lookup_tail(struct net *net, struct sock *sk,
                               struct dst_entry **dst, struct flowi6 *fl6)
 {
-       struct net *net = sock_net(sk);
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
        struct neighbour *n;
        struct rt6_info *rt;
@@ -994,10 +993,11 @@ out_err_release:
  *
  *     It returns zero on success, or a standard errno code on error.
  */
-int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
+int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
+                  struct flowi6 *fl6)
 {
        *dst = NULL;
-       return ip6_dst_lookup_tail(sk, dst, fl6);
+       return ip6_dst_lookup_tail(net, sk, dst, fl6);
 }
 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
 
@@ -1018,11 +1018,13 @@ struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
        struct dst_entry *dst = NULL;
        int err;
 
-       err = ip6_dst_lookup_tail(sk, &dst, fl6);
+       err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
        if (err)
                return ERR_PTR(err);
        if (final_dst)
                fl6->daddr = *final_dst;
+       if (!fl6->flowi6_oif)
+               fl6->flowi6_oif = dst->dev->ifindex;
 
        return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
 }
@@ -1050,7 +1052,7 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
 
        dst = ip6_sk_dst_check(sk, dst, fl6);
 
-       err = ip6_dst_lookup_tail(sk, &dst, fl6);
+       err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
        if (err)
                return ERR_PTR(err);
        if (final_dst)
@@ -1647,7 +1649,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
 
        ip6_flow_hdr(hdr, v6_cork->tclass,
                     ip6_make_flowlabel(net, skb, fl6->flowlabel,
-                                       np->autoflowlabel));
+                                       np->autoflowlabel, fl6));
        hdr->hop_limit = v6_cork->hop_limit;
        hdr->nexthdr = proto;
        hdr->saddr = fl6->saddr;
index 2e67b660118bf7eeaf2f08033aa759190dea0c3a..b0ab420612bcc30efd5e58a30bdfb5e730e88b0e 100644 (file)
@@ -1095,7 +1095,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
        skb_reset_network_header(skb);
        ipv6h = ipv6_hdr(skb);
        ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
-                    ip6_make_flowlabel(net, skb, fl6->flowlabel, false));
+                    ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
        ipv6h->hop_limit = t->parms.hop_limit;
        ipv6h->nexthdr = proto;
        ipv6h->saddr = fl6->saddr;
index c53331cfed95dcbb6672b80282125516e904a84f..b3054611f88a5f69503e1a44ced1592579dfc4fd 100644 (file)
@@ -1225,18 +1225,16 @@ static void ndisc_router_discovery(struct sk_buff *skb)
 
        if (rt)
                rt6_set_expires(rt, jiffies + (HZ * lifetime));
-       if (ra_msg->icmph.icmp6_hop_limit) {
-               /* Only set hop_limit on the interface if it is higher than
-                * the current hop_limit.
-                */
-               if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
+       if (in6_dev->cnf.accept_ra_min_hop_limit < 256 &&
+           ra_msg->icmph.icmp6_hop_limit) {
+               if (in6_dev->cnf.accept_ra_min_hop_limit <= ra_msg->icmph.icmp6_hop_limit) {
                        in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
+                       if (rt)
+                               dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
+                                              ra_msg->icmph.icmp6_hop_limit);
                } else {
-                       ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
+                       ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than minimum\n");
                }
-               if (rt)
-                       dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
-                                      ra_msg->icmph.icmp6_hop_limit);
        }
 
 skip_defrtr:
index ca4700cb26c4feec258c8e5034389522125b113e..fdbada1569a37348b47b60769f7d679741b21d0a 100644 (file)
@@ -295,7 +295,8 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                 * unspecified and mapped address have a v4 equivalent.
                 */
                v4addr = LOOPBACK4_IPV6;
-               if (!(addr_type & IPV6_ADDR_MULTICAST)) {
+               if (!(addr_type & IPV6_ADDR_MULTICAST) &&
+                   !sock_net(sk)->ipv6.sysctl.ip_nonlocal_bind) {
                        err = -EADDRNOTAVAIL;
                        if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
                                           dev, 0)) {
index 6090969937f8b6809f74c3d03f29a0703089eff1..54fccf0d705ddee83e3ba1e1b655fc58cd5c38a4 100644 (file)
@@ -58,6 +58,7 @@
 #include <net/netevent.h>
 #include <net/netlink.h>
 #include <net/nexthop.h>
+#include <net/lwtunnel.h>
 
 #include <asm/uaccess.h>
 
@@ -544,6 +545,7 @@ static void rt6_probe_deferred(struct work_struct *w)
 
 static void rt6_probe(struct rt6_info *rt)
 {
+       struct __rt6_probe_work *work;
        struct neighbour *neigh;
        /*
         * Okay, this does not seem to be appropriate
@@ -558,34 +560,33 @@ static void rt6_probe(struct rt6_info *rt)
        rcu_read_lock_bh();
        neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
        if (neigh) {
-               write_lock(&neigh->lock);
                if (neigh->nud_state & NUD_VALID)
                        goto out;
-       }
-
-       if (!neigh ||
-           time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
-               struct __rt6_probe_work *work;
 
+               work = NULL;
+               write_lock(&neigh->lock);
+               if (!(neigh->nud_state & NUD_VALID) &&
+                   time_after(jiffies,
+                              neigh->updated +
+                              rt->rt6i_idev->cnf.rtr_probe_interval)) {
+                       work = kmalloc(sizeof(*work), GFP_ATOMIC);
+                       if (work)
+                               __neigh_set_probe_once(neigh);
+               }
+               write_unlock(&neigh->lock);
+       } else {
                work = kmalloc(sizeof(*work), GFP_ATOMIC);
+       }
 
-               if (neigh && work)
-                       __neigh_set_probe_once(neigh);
-
-               if (neigh)
-                       write_unlock(&neigh->lock);
+       if (work) {
+               INIT_WORK(&work->work, rt6_probe_deferred);
+               work->target = rt->rt6i_gateway;
+               dev_hold(rt->dst.dev);
+               work->dev = rt->dst.dev;
+               schedule_work(&work->work);
+       }
 
-               if (work) {
-                       INIT_WORK(&work->work, rt6_probe_deferred);
-                       work->target = rt->rt6i_gateway;
-                       dev_hold(rt->dst.dev);
-                       work->dev = rt->dst.dev;
-                       schedule_work(&work->work);
-               }
-       } else {
 out:
-               write_unlock(&neigh->lock);
-       }
        rcu_read_unlock_bh();
 }
 #else
@@ -1770,6 +1771,18 @@ int ip6_route_add(struct fib6_config *cfg)
 
        rt->dst.output = ip6_output;
 
+       if (cfg->fc_encap) {
+               struct lwtunnel_state *lwtstate;
+
+               err = lwtunnel_build_state(dev, cfg->fc_encap_type,
+                                          cfg->fc_encap, &lwtstate);
+               if (err)
+                       goto out;
+               rt->rt6i_lwtstate = lwtstate_get(lwtstate);
+               if (lwtunnel_output_redirect(rt->rt6i_lwtstate))
+                       rt->dst.output = lwtunnel_output6;
+       }
+
        ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
        rt->rt6i_dst.plen = cfg->fc_dst_len;
        if (rt->rt6i_dst.plen == 128)
@@ -2147,6 +2160,7 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
 #endif
        rt->rt6i_prefsrc = ort->rt6i_prefsrc;
        rt->rt6i_table = ort->rt6i_table;
+       rt->rt6i_lwtstate = lwtstate_get(ort->rt6i_lwtstate);
 }
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
@@ -2595,6 +2609,8 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
        [RTA_METRICS]           = { .type = NLA_NESTED },
        [RTA_MULTIPATH]         = { .len = sizeof(struct rtnexthop) },
        [RTA_PREF]              = { .type = NLA_U8 },
+       [RTA_ENCAP_TYPE]        = { .type = NLA_U16 },
+       [RTA_ENCAP]             = { .type = NLA_NESTED },
 };
 
 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -2689,6 +2705,12 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
                cfg->fc_flags |= RTF_PREF(pref);
        }
 
+       if (tb[RTA_ENCAP])
+               cfg->fc_encap = tb[RTA_ENCAP];
+
+       if (tb[RTA_ENCAP_TYPE])
+               cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
+
        err = 0;
 errout:
        return err;
@@ -2721,6 +2743,10 @@ beginning:
                                r_cfg.fc_gateway = nla_get_in6_addr(nla);
                                r_cfg.fc_flags |= RTF_GATEWAY;
                        }
+                       r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
+                       nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
+                       if (nla)
+                               r_cfg.fc_encap_type = nla_get_u16(nla);
                }
                err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
                if (err) {
@@ -2783,7 +2809,7 @@ static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
                return ip6_route_add(&cfg);
 }
 
-static inline size_t rt6_nlmsg_size(void)
+static inline size_t rt6_nlmsg_size(struct rt6_info *rt)
 {
        return NLMSG_ALIGN(sizeof(struct rtmsg))
               + nla_total_size(16) /* RTA_SRC */
@@ -2797,7 +2823,8 @@ static inline size_t rt6_nlmsg_size(void)
               + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
               + nla_total_size(sizeof(struct rta_cacheinfo))
               + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
-              + nla_total_size(1); /* RTA_PREF */
+              + nla_total_size(1) /* RTA_PREF */
+              + lwtunnel_get_encap_size(rt->rt6i_lwtstate);
 }
 
 static int rt6_fill_node(struct net *net,
@@ -2945,6 +2972,8 @@ static int rt6_fill_node(struct net *net,
        if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
                goto nla_put_failure;
 
+       lwtunnel_fill_encap(skb, rt->rt6i_lwtstate);
+
        nlmsg_end(skb, nlh);
        return 0;
 
@@ -3071,7 +3100,7 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
        err = -ENOBUFS;
        seq = info->nlh ? info->nlh->nlmsg_seq : 0;
 
-       skb = nlmsg_new(rt6_nlmsg_size(), gfp_any());
+       skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
        if (!skb)
                goto errout;
 
index 4e705add4f187c69b1b11202d47808308e87cf80..45243bbe52536d523a7aff0b636a72b8f2e3daf5 100644 (file)
@@ -17,6 +17,9 @@
 #include <net/inet_frag.h>
 
 static int one = 1;
+static int auto_flowlabels_min;
+static int auto_flowlabels_max = IP6_AUTO_FLOW_LABEL_MAX;
+
 
 static struct ctl_table ipv6_table_template[] = {
        {
@@ -45,7 +48,9 @@ static struct ctl_table ipv6_table_template[] = {
                .data           = &init_net.ipv6.sysctl.auto_flowlabels,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &auto_flowlabels_min,
+               .extra2         = &auto_flowlabels_max
        },
        {
                .procname       = "fwmark_reflect",
@@ -75,6 +80,13 @@ static struct ctl_table ipv6_table_template[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "ip_nonlocal_bind",
+               .data           = &init_net.ipv6.sysctl.ip_nonlocal_bind,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
        { }
 };
 
@@ -117,6 +129,7 @@ static int __net_init ipv6_sysctl_net_init(struct net *net)
        ipv6_table[5].data = &net->ipv6.sysctl.idgen_retries;
        ipv6_table[6].data = &net->ipv6.sysctl.idgen_delay;
        ipv6_table[7].data = &net->ipv6.sysctl.flowlabel_state_ranges;
+       ipv6_table[8].data = &net->ipv6.sysctl.ip_nonlocal_bind;
 
        ipv6_route_table = ipv6_route_sysctl_init(net);
        if (!ipv6_route_table)
index 6748c4277affad71cd721e3a985af10c31c047ad..52dd0d9974d6c8dbaa4961434211eda2f55b6482 100644 (file)
@@ -276,7 +276,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        if (err)
                goto late_failure;
 
-       ip6_set_txhash(sk);
+       sk_set_txhash(sk);
 
        if (!tp->write_seq && likely(!tp->repair))
                tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
@@ -1090,7 +1090,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
        newsk->sk_bound_dev_if = ireq->ir_iif;
 
-       ip6_set_txhash(newsk);
+       sk_set_txhash(newsk);
 
        /* Now IPv6 options...
 
@@ -1481,8 +1481,7 @@ do_time_wait:
                                            ntohs(th->dest), tcp_v6_iif(skb));
                if (sk2) {
                        struct inet_timewait_sock *tw = inet_twsk(sk);
-                       inet_twsk_deschedule(tw);
-                       inet_twsk_put(tw);
+                       inet_twsk_deschedule_put(tw);
                        sk = sk2;
                        tcp_v6_restore_cb(skb);
                        goto process;
index 317c4662e544679ab37dcc8cfa92fc8108c4820b..f7ba51e8b4cafbf720c5ee3096c1102cbf2a4438 100644 (file)
@@ -44,6 +44,49 @@ static void ieee802154_del_iface_deprecated(struct wpan_phy *wpan_phy,
        ieee802154_if_remove(sdata);
 }
 
+#ifdef CONFIG_PM
+static int ieee802154_suspend(struct wpan_phy *wpan_phy)
+{
+       struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+
+       if (!local->open_count)
+               goto suspend;
+
+       ieee802154_stop_queue(&local->hw);
+       synchronize_net();
+
+       /* stop hardware - this must stop RX */
+       ieee802154_stop_device(local);
+
+suspend:
+       local->suspended = true;
+       return 0;
+}
+
+static int ieee802154_resume(struct wpan_phy *wpan_phy)
+{
+       struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
+       int ret;
+
+       /* nothing to do if HW shouldn't run */
+       if (!local->open_count)
+               goto wake_up;
+
+       /* restart hardware */
+       ret = drv_start(local);
+       if (ret)
+               return ret;
+
+wake_up:
+       ieee802154_wake_queue(&local->hw);
+       local->suspended = false;
+       return 0;
+}
+#else
+#define ieee802154_suspend NULL
+#define ieee802154_resume NULL
+#endif
+
 static int
 ieee802154_add_iface(struct wpan_phy *phy, const char *name,
                     unsigned char name_assign_type,
@@ -145,13 +188,18 @@ static int
 ieee802154_set_pan_id(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
                      __le16 pan_id)
 {
+       int ret;
+
        ASSERT_RTNL();
 
        if (wpan_dev->pan_id == pan_id)
                return 0;
 
-       wpan_dev->pan_id = pan_id;
-       return 0;
+       ret = mac802154_wpan_update_llsec(wpan_dev->netdev);
+       if (!ret)
+               wpan_dev->pan_id = pan_id;
+
+       return ret;
 }
 
 static int
@@ -227,6 +275,8 @@ ieee802154_set_lbt_mode(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
 const struct cfg802154_ops mac802154_config_ops = {
        .add_virtual_intf_deprecated = ieee802154_add_iface_deprecated,
        .del_virtual_intf_deprecated = ieee802154_del_iface_deprecated,
+       .suspend = ieee802154_suspend,
+       .resume = ieee802154_resume,
        .add_virtual_intf = ieee802154_add_iface,
        .del_virtual_intf = ieee802154_del_iface,
        .set_channel = ieee802154_set_channel,
index 34755d5751a4681c65d09cb7487e5b98350cedfe..56ccffa3f2bfc7731adfaabb1026ef7e8af68d32 100644 (file)
@@ -56,9 +56,13 @@ struct ieee802154_local {
        struct hrtimer ifs_timer;
 
        bool started;
+       bool suspended;
 
        struct tasklet_struct tasklet;
        struct sk_buff_head skb_queue;
+
+       struct sk_buff *tx_skb;
+       struct work_struct tx_work;
 };
 
 enum {
@@ -94,8 +98,6 @@ struct ieee802154_sub_if_data {
        struct mac802154_llsec sec;
 };
 
-#define MAC802154_CHAN_NONE            0xff /* No channel is assigned */
-
 /* utility functions/constants */
 extern const void *const mac802154_wpan_phy_privid; /*  for wpan_phy privid */
 
@@ -125,6 +127,8 @@ ieee802154_sdata_running(struct ieee802154_sub_if_data *sdata)
 
 extern struct ieee802154_mlme_ops mac802154_mlme_wpan;
 
+void ieee802154_rx(struct ieee802154_local *local, struct sk_buff *skb);
+void ieee802154_xmit_worker(struct work_struct *work);
 netdev_tx_t
 ieee802154_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev);
 netdev_tx_t
@@ -167,6 +171,8 @@ void mac802154_get_table(struct net_device *dev,
                         struct ieee802154_llsec_table **t);
 void mac802154_unlock_table(struct net_device *dev);
 
+int mac802154_wpan_update_llsec(struct net_device *dev);
+
 /* interface handling */
 int ieee802154_iface_init(void);
 void ieee802154_iface_exit(void);
@@ -176,5 +182,6 @@ ieee802154_if_add(struct ieee802154_local *local, const char *name,
                  unsigned char name_assign_type, enum nl802154_iftype type,
                  __le64 extended_addr);
 void ieee802154_remove_interfaces(struct ieee802154_local *local);
+void ieee802154_stop_device(struct ieee802154_local *local);
 
 #endif /* __IEEE802154_I_H */
index 8b698246a51b6d304c209442ca0ecd0c3e652c79..416de903e46757cfead3fe54106efa07ce6e6245 100644 (file)
@@ -30,7 +30,7 @@
 #include "ieee802154_i.h"
 #include "driver-ops.h"
 
-static int mac802154_wpan_update_llsec(struct net_device *dev)
+int mac802154_wpan_update_llsec(struct net_device *dev)
 {
        struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev);
        struct ieee802154_mlme_ops *ops = ieee802154_mlme_ops(dev);
@@ -314,11 +314,8 @@ static int mac802154_slave_close(struct net_device *dev)
 
        clear_bit(SDATA_STATE_RUNNING, &sdata->state);
 
-       if (!local->open_count) {
-               flush_workqueue(local->workqueue);
-               hrtimer_cancel(&local->ifs_timer);
-               drv_stop(local);
-       }
+       if (!local->open_count)
+               ieee802154_stop_device(local);
 
        return 0;
 }
@@ -471,6 +468,7 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
                       enum nl802154_iftype type)
 {
        struct wpan_dev *wpan_dev = &sdata->wpan_dev;
+       int ret;
        u8 tmp;
 
        /* set some type-dependent values */
@@ -505,6 +503,10 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
                mutex_init(&sdata->sec_mtx);
 
                mac802154_llsec_init(&sdata->sec);
+               ret = mac802154_wpan_update_llsec(sdata->dev);
+               if (ret < 0)
+                       return ret;
+
                break;
        case NL802154_IFTYPE_MONITOR:
                sdata->dev->destructor = free_netdev;
index 356b346e1ee86fdeadebf7be5d318c70dbc0d969..9e55431b9a5cc0baf0c40fa9e7a96c3381617ad5 100644 (file)
@@ -40,7 +40,7 @@ static void ieee802154_tasklet_handler(unsigned long data)
                         * netstack.
                         */
                        skb->pkt_type = 0;
-                       ieee802154_rx(&local->hw, skb);
+                       ieee802154_rx(local, skb);
                        break;
                default:
                        WARN(1, "mac802154: Packet is of unknown type %d\n",
@@ -58,11 +58,9 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
        struct ieee802154_local *local;
        size_t priv_size;
 
-       if (!ops || !(ops->xmit_async || ops->xmit_sync) || !ops->ed ||
-           !ops->start || !ops->stop || !ops->set_channel) {
-               pr_err("undefined IEEE802.15.4 device operations\n");
+       if (WARN_ON(!ops || !(ops->xmit_async || ops->xmit_sync) || !ops->ed ||
+                   !ops->start || !ops->stop || !ops->set_channel))
                return NULL;
-       }
 
        /* Ensure 32-byte alignment of our private data and hw private data.
         * We use the wpan_phy priv data for both our ieee802154_local and for
@@ -107,6 +105,8 @@ ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops)
 
        skb_queue_head_init(&local->skb_queue);
 
+       INIT_WORK(&local->tx_work, ieee802154_xmit_worker);
+
        /* init supported flags with 802.15.4 default ranges */
        phy->supported.max_minbe = 8;
        phy->supported.min_maxbe = 3;
index d93ad2d4a4fc2a8cf103d0a87337e2eb115fddd0..d1c33c1d6b9b3dcd74dc077b5de5b5556a18901e 100644 (file)
@@ -246,13 +246,15 @@ ieee802154_monitors_rx(struct ieee802154_local *local, struct sk_buff *skb)
        }
 }
 
-void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb)
+void ieee802154_rx(struct ieee802154_local *local, struct sk_buff *skb)
 {
-       struct ieee802154_local *local = hw_to_local(hw);
        u16 crc;
 
        WARN_ON_ONCE(softirq_count() == 0);
 
+       if (local->suspended)
+               goto drop;
+
        /* TODO: When a transceiver omits the checksum here, we
         * add an own calculated one. This is currently an ugly
         * solution because the monitor needs a crc here.
@@ -273,8 +275,7 @@ void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb)
                crc = crc_ccitt(0, skb->data, skb->len);
                if (crc) {
                        rcu_read_unlock();
-                       kfree_skb(skb);
-                       return;
+                       goto drop;
                }
        }
        /* remove crc */
@@ -283,8 +284,11 @@ void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb)
        __ieee802154_rx_handle_packet(local, skb);
 
        rcu_read_unlock();
+
+       return;
+drop:
+       kfree_skb(skb);
 }
-EXPORT_SYMBOL(ieee802154_rx);
 
 void
 ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi)
index c62e95695c7843947c8643cb268f95f2e64c3da9..7ed439172f30809d59fb5673131957ca8c25c56c 100644 (file)
 #include "ieee802154_i.h"
 #include "driver-ops.h"
 
-/* IEEE 802.15.4 transceivers can sleep during the xmit session, so process
- * packets through the workqueue.
- */
-struct ieee802154_xmit_cb {
-       struct sk_buff *skb;
-       struct work_struct work;
-       struct ieee802154_local *local;
-};
-
-static struct ieee802154_xmit_cb ieee802154_xmit_cb;
-
-static void ieee802154_xmit_worker(struct work_struct *work)
+void ieee802154_xmit_worker(struct work_struct *work)
 {
-       struct ieee802154_xmit_cb *cb =
-               container_of(work, struct ieee802154_xmit_cb, work);
-       struct ieee802154_local *local = cb->local;
-       struct sk_buff *skb = cb->skb;
+       struct ieee802154_local *local =
+               container_of(work, struct ieee802154_local, tx_work);
+       struct sk_buff *skb = local->tx_skb;
        struct net_device *dev = skb->dev;
        int res;
 
@@ -106,11 +94,8 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
                dev->stats.tx_packets++;
                dev->stats.tx_bytes += skb->len;
        } else {
-               INIT_WORK(&ieee802154_xmit_cb.work, ieee802154_xmit_worker);
-               ieee802154_xmit_cb.skb = skb;
-               ieee802154_xmit_cb.local = local;
-
-               queue_work(local->workqueue, &ieee802154_xmit_cb.work);
+               local->tx_skb = skb;
+               queue_work(local->workqueue, &local->tx_work);
        }
 
        return NETDEV_TX_OK;
index 583435f3893037e45d4a5879b66b4cda3bb6f27f..f9fd0957ab67f256d10e80563bb68ce88334f911 100644 (file)
@@ -14,6 +14,7 @@
  */
 
 #include "ieee802154_i.h"
+#include "driver-ops.h"
 
 /* privid for wpan_phys to determine whether they belong to us or not */
 const void *const mac802154_wpan_phy_privid = &mac802154_wpan_phy_privid;
@@ -92,3 +93,10 @@ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
        dev_consume_skb_any(skb);
 }
 EXPORT_SYMBOL(ieee802154_xmit_complete);
+
+void ieee802154_stop_device(struct ieee802154_local *local)
+{
+       flush_workqueue(local->workqueue);
+       hrtimer_cancel(&local->ifs_timer);
+       drv_stop(local);
+}
index 17bde799c8548e46a791fabc8b9619c57f2b2f99..5c467ef973114c49b352ac64ed248e4199d3d0f3 100644 (file)
@@ -24,7 +24,13 @@ config NET_MPLS_GSO
 
 config MPLS_ROUTING
        tristate "MPLS: routing support"
-       help
+       ---help---
         Add support for forwarding of mpls packets.
 
+config MPLS_IPTUNNEL
+       tristate "MPLS: IP over MPLS tunnel support"
+       depends on LWTUNNEL && MPLS_ROUTING
+       ---help---
+        mpls ip tunnel support.
+
 endif # MPLS
index 65bbe68c72e66ad44463ed465eaafe01e4c2bd10..9ca92362501653463429b0254d999782f2b6c450 100644 (file)
@@ -3,5 +3,6 @@
 #
 obj-$(CONFIG_NET_MPLS_GSO) += mpls_gso.o
 obj-$(CONFIG_MPLS_ROUTING) += mpls_router.o
+obj-$(CONFIG_MPLS_IPTUNNEL) += mpls_iptunnel.o
 
 mpls_router-y := af_mpls.o
index 1f93a5978f2ad43fc81a16427e34d07ca2c0f34e..88cfaa241c07b72650344da9097363f7fd85d5f2 100644 (file)
 #include <net/ip_fib.h>
 #include <net/netevent.h>
 #include <net/netns/generic.h>
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#endif
 #include "internal.h"
 
 #define LABEL_NOT_SPECIFIED (1<<20)
@@ -58,10 +62,11 @@ static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev)
        return rcu_dereference_rtnl(dev->mpls_ptr);
 }
 
-static bool mpls_output_possible(const struct net_device *dev)
+bool mpls_output_possible(const struct net_device *dev)
 {
        return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev);
 }
+EXPORT_SYMBOL_GPL(mpls_output_possible);
 
 static unsigned int mpls_rt_header_size(const struct mpls_route *rt)
 {
@@ -69,13 +74,14 @@ static unsigned int mpls_rt_header_size(const struct mpls_route *rt)
        return rt->rt_labels * sizeof(struct mpls_shim_hdr);
 }
 
-static unsigned int mpls_dev_mtu(const struct net_device *dev)
+unsigned int mpls_dev_mtu(const struct net_device *dev)
 {
        /* The amount of data the layer 2 frame can hold */
        return dev->mtu;
 }
+EXPORT_SYMBOL_GPL(mpls_dev_mtu);
 
-static bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
+bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
 {
        if (skb->len <= mtu)
                return false;
@@ -85,6 +91,7 @@ static bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
 
        return true;
 }
+EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
 
 static bool mpls_egress(struct mpls_route *rt, struct sk_buff *skb,
                        struct mpls_entry_decoded dec)
@@ -327,6 +334,90 @@ static unsigned find_free_label(struct net *net)
        return LABEL_NOT_SPECIFIED;
 }
 
+#if IS_ENABLED(CONFIG_INET)
+static struct net_device *inet_fib_lookup_dev(struct net *net, void *addr)
+{
+       struct net_device *dev = NULL;
+       struct rtable *rt;
+       struct in_addr daddr;
+
+       memcpy(&daddr, addr, sizeof(struct in_addr));
+       rt = ip_route_output(net, daddr.s_addr, 0, 0, 0);
+       if (IS_ERR(rt))
+               goto errout;
+
+       dev = rt->dst.dev;
+       dev_hold(dev);
+
+       ip_rt_put(rt);
+
+       return dev;
+errout:
+       return ERR_PTR(-ENODEV);
+}
+#else
+static struct net_device *inet_fib_lookup_dev(struct net *net, void *addr)
+{
+       return ERR_PTR(-EAFNOSUPPORT);
+}
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr)
+{
+       struct net_device *dev = NULL;
+       struct dst_entry *dst;
+       struct flowi6 fl6;
+       int err;
+
+       if (!ipv6_stub)
+               return ERR_PTR(-EAFNOSUPPORT);
+
+       memset(&fl6, 0, sizeof(fl6));
+       memcpy(&fl6.daddr, addr, sizeof(struct in6_addr));
+       err = ipv6_stub->ipv6_dst_lookup(net, NULL, &dst, &fl6);
+       if (err)
+               goto errout;
+
+       dev = dst->dev;
+       dev_hold(dev);
+       dst_release(dst);
+
+       return dev;
+
+errout:
+       return ERR_PTR(err);
+}
+#else
+static struct net_device *inet6_fib_lookup_dev(struct net *net, void *addr)
+{
+       return ERR_PTR(-EAFNOSUPPORT);
+}
+#endif
+
+static struct net_device *find_outdev(struct net *net,
+                                     struct mpls_route_config *cfg)
+{
+       struct net_device *dev = NULL;
+
+       if (!cfg->rc_ifindex) {
+               switch (cfg->rc_via_table) {
+               case NEIGH_ARP_TABLE:
+                       dev = inet_fib_lookup_dev(net, cfg->rc_via);
+                       break;
+               case NEIGH_ND_TABLE:
+                       dev = inet6_fib_lookup_dev(net, cfg->rc_via);
+                       break;
+               case NEIGH_LINK_TABLE:
+                       break;
+               }
+       } else {
+               dev = dev_get_by_index(net, cfg->rc_ifindex);
+       }
+
+       return dev;
+}
+
 static int mpls_route_add(struct mpls_route_config *cfg)
 {
        struct mpls_route __rcu **platform_label;
@@ -357,10 +448,12 @@ static int mpls_route_add(struct mpls_route_config *cfg)
        if (cfg->rc_output_labels > MAX_NEW_LABELS)
                goto errout;
 
-       err = -ENODEV;
-       dev = dev_get_by_index(net, cfg->rc_ifindex);
-       if (!dev)
+       dev = find_outdev(net, cfg);
+       if (IS_ERR(dev)) {
+               err = PTR_ERR(dev);
+               dev = NULL;
                goto errout;
+       }
 
        /* Ensure this is a supported device */
        err = -EINVAL;
@@ -626,6 +719,7 @@ int nla_put_labels(struct sk_buff *skb, int attrtype,
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(nla_put_labels);
 
 int nla_get_labels(const struct nlattr *nla,
                   u32 max_labels, u32 *labels, u32 label[])
@@ -671,6 +765,7 @@ int nla_get_labels(const struct nlattr *nla,
        *labels = nla_labels;
        return 0;
 }
+EXPORT_SYMBOL_GPL(nla_get_labels);
 
 static int rtm_to_route_config(struct sk_buff *skb,  struct nlmsghdr *nlh,
                               struct mpls_route_config *cfg)
index 8cabeb5a1cb928c856c037c5994116df8547fb71..2681a4ba6c375f3faf83498150350ddea7392ccc 100644 (file)
@@ -50,7 +50,12 @@ static inline struct mpls_entry_decoded mpls_entry_decode(struct mpls_shim_hdr *
        return result;
 }
 
-int nla_put_labels(struct sk_buff *skb, int attrtype,  u8 labels, const u32 label[]);
-int nla_get_labels(const struct nlattr *nla, u32 max_labels, u32 *labels, u32 label[]);
+int nla_put_labels(struct sk_buff *skb, int attrtype,  u8 labels,
+                  const u32 label[]);
+int nla_get_labels(const struct nlattr *nla, u32 max_labels, u32 *labels,
+                  u32 label[]);
+bool mpls_output_possible(const struct net_device *dev);
+unsigned int mpls_dev_mtu(const struct net_device *dev);
+bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu);
 
 #endif /* MPLS_INTERNAL_H */
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
new file mode 100644 (file)
index 0000000..276f8c9
--- /dev/null
@@ -0,0 +1,233 @@
+/*
+ * mpls tunnels        An implementation mpls tunnels using the light weight tunnel
+ *             infrastructure
+ *
+ * Authors:    Roopa Prabhu, <roopa@cumulusnetworks.com>
+ *
+ *             This program is free software; you can redistribute it and/or
+ *             modify it under the terms of the GNU General Public License
+ *             as published by the Free Software Foundation; either version
+ *             2 of the License, or (at your option) any later version.
+ *
+ */
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/net.h>
+#include <linux/module.h>
+#include <linux/mpls.h>
+#include <linux/vmalloc.h>
+#include <net/ip.h>
+#include <net/dst.h>
+#include <net/lwtunnel.h>
+#include <net/netevent.h>
+#include <net/netns/generic.h>
+#include <net/ip6_fib.h>
+#include <net/route.h>
+#include <net/mpls_iptunnel.h>
+#include <linux/mpls_iptunnel.h>
+#include "internal.h"
+
+static const struct nla_policy mpls_iptunnel_policy[MPLS_IPTUNNEL_MAX + 1] = {
+       [MPLS_IPTUNNEL_DST]     = { .type = NLA_U32 },
+};
+
+static unsigned int mpls_encap_size(struct mpls_iptunnel_encap *en)
+{
+       /* The size of the layer 2.5 labels to be added for this route */
+       return en->labels * sizeof(struct mpls_shim_hdr);
+}
+
+int mpls_output(struct sock *sk, struct sk_buff *skb)
+{
+       struct mpls_iptunnel_encap *tun_encap_info;
+       struct mpls_shim_hdr *hdr;
+       struct net_device *out_dev;
+       unsigned int hh_len;
+       unsigned int new_header_size;
+       unsigned int mtu;
+       struct dst_entry *dst = skb_dst(skb);
+       struct rtable *rt = NULL;
+       struct rt6_info *rt6 = NULL;
+       struct lwtunnel_state *lwtstate = NULL;
+       int err = 0;
+       bool bos;
+       int i;
+       unsigned int ttl;
+
+       /* Obtain the ttl */
+       if (skb->protocol == htons(ETH_P_IP)) {
+               ttl = ip_hdr(skb)->ttl;
+               rt = (struct rtable *)dst;
+               lwtstate = rt->rt_lwtstate;
+       } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               ttl = ipv6_hdr(skb)->hop_limit;
+               rt6 = (struct rt6_info *)dst;
+               lwtstate = rt6->rt6i_lwtstate;
+       } else {
+               goto drop;
+       }
+
+       skb_orphan(skb);
+
+       /* Find the output device */
+       out_dev = dst->dev;
+       if (!mpls_output_possible(out_dev) ||
+           !lwtstate || skb_warn_if_lro(skb))
+               goto drop;
+
+       skb_forward_csum(skb);
+
+       tun_encap_info = mpls_lwtunnel_encap(lwtstate);
+
+       /* Verify the destination can hold the packet */
+       new_header_size = mpls_encap_size(tun_encap_info);
+       mtu = mpls_dev_mtu(out_dev);
+       if (mpls_pkt_too_big(skb, mtu - new_header_size))
+               goto drop;
+
+       hh_len = LL_RESERVED_SPACE(out_dev);
+       if (!out_dev->header_ops)
+               hh_len = 0;
+
+       /* Ensure there is enough space for the headers in the skb */
+       if (skb_cow(skb, hh_len + new_header_size))
+               goto drop;
+
+       skb_push(skb, new_header_size);
+       skb_reset_network_header(skb);
+
+       skb->dev = out_dev;
+       skb->protocol = htons(ETH_P_MPLS_UC);
+
+       /* Push the new labels */
+       hdr = mpls_hdr(skb);
+       bos = true;
+       for (i = tun_encap_info->labels - 1; i >= 0; i--) {
+               hdr[i] = mpls_entry_encode(tun_encap_info->label[i],
+                                          ttl, 0, bos);
+               bos = false;
+       }
+
+       if (rt)
+               err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gateway,
+                                skb);
+       else if (rt6)
+               err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt6->rt6i_gateway,
+                                skb);
+       if (err)
+               net_dbg_ratelimited("%s: packet transmission failed: %d\n",
+                                   __func__, err);
+
+       return 0;
+
+drop:
+       kfree_skb(skb);
+       return -EINVAL;
+}
+
+static int mpls_build_state(struct net_device *dev, struct nlattr *nla,
+                           struct lwtunnel_state **ts)
+{
+       struct mpls_iptunnel_encap *tun_encap_info;
+       struct nlattr *tb[MPLS_IPTUNNEL_MAX + 1];
+       struct lwtunnel_state *newts;
+       int tun_encap_info_len;
+       int ret;
+
+       ret = nla_parse_nested(tb, MPLS_IPTUNNEL_MAX, nla,
+                              mpls_iptunnel_policy);
+       if (ret < 0)
+               return ret;
+
+       if (!tb[MPLS_IPTUNNEL_DST])
+               return -EINVAL;
+
+       tun_encap_info_len = sizeof(*tun_encap_info);
+
+       newts = lwtunnel_state_alloc(tun_encap_info_len);
+       if (!newts)
+               return -ENOMEM;
+
+       newts->len = tun_encap_info_len;
+       tun_encap_info = mpls_lwtunnel_encap(newts);
+       ret = nla_get_labels(tb[MPLS_IPTUNNEL_DST], MAX_NEW_LABELS,
+                            &tun_encap_info->labels, tun_encap_info->label);
+       if (ret)
+               goto errout;
+       newts->type = LWTUNNEL_ENCAP_MPLS;
+       newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT;
+
+       *ts = newts;
+
+       return 0;
+
+errout:
+       kfree(newts);
+       *ts = NULL;
+
+       return ret;
+}
+
+static int mpls_fill_encap_info(struct sk_buff *skb,
+                               struct lwtunnel_state *lwtstate)
+{
+       struct mpls_iptunnel_encap *tun_encap_info;
+       
+       tun_encap_info = mpls_lwtunnel_encap(lwtstate);
+
+       if (nla_put_labels(skb, MPLS_IPTUNNEL_DST, tun_encap_info->labels,
+                          tun_encap_info->label))
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static int mpls_encap_nlsize(struct lwtunnel_state *lwtstate)
+{
+       struct mpls_iptunnel_encap *tun_encap_info;
+
+       tun_encap_info = mpls_lwtunnel_encap(lwtstate);
+
+       return nla_total_size(tun_encap_info->labels * 4);
+}
+
+static int mpls_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
+{
+       struct mpls_iptunnel_encap *a_hdr = mpls_lwtunnel_encap(a);
+       struct mpls_iptunnel_encap *b_hdr = mpls_lwtunnel_encap(b);
+       int l;
+
+       if (a_hdr->labels != b_hdr->labels)
+               return 1;
+
+       for (l = 0; l < MAX_NEW_LABELS; l++)
+               if (a_hdr->label[l] != b_hdr->label[l])
+                       return 1;
+       return 0;
+}
+
+static const struct lwtunnel_encap_ops mpls_iptun_ops = {
+       .build_state = mpls_build_state,
+       .output = mpls_output,
+       .fill_encap = mpls_fill_encap_info,
+       .get_encap_size = mpls_encap_nlsize,
+       .cmp_encap = mpls_encap_cmp,
+};
+
+static int __init mpls_iptunnel_init(void)
+{
+       return lwtunnel_encap_add_ops(&mpls_iptun_ops, LWTUNNEL_ENCAP_MPLS);
+}
+module_init(mpls_iptunnel_init);
+
+static void __exit mpls_iptunnel_exit(void)
+{
+       lwtunnel_encap_del_ops(&mpls_iptun_ops, LWTUNNEL_ENCAP_MPLS);
+}
+module_exit(mpls_iptunnel_exit);
+
+MODULE_DESCRIPTION("MultiProtocol Label Switching IP Tunnels");
+MODULE_LICENSE("GPL v2");
index 52561e1c31e26933dd654f095663c0f0a633f007..cb2f13ebb5a66cdf5fd9498ddfe8949f38741803 100644 (file)
@@ -166,11 +166,13 @@ void nft_meta_get_eval(const struct nft_expr *expr,
                        goto err;
                *dest = out->group;
                break;
+#ifdef CONFIG_CGROUP_NET_CLASSID
        case NFT_META_CGROUP:
                if (skb->sk == NULL || !sk_fullsock(skb->sk))
                        goto err;
                *dest = skb->sk->sk_classid;
                break;
+#endif
        default:
                WARN_ON(1);
                goto err;
@@ -246,7 +248,9 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
        case NFT_META_CPU:
        case NFT_META_IIFGROUP:
        case NFT_META_OIFGROUP:
+#ifdef CONFIG_CGROUP_NET_CLASSID
        case NFT_META_CGROUP:
+#endif
                len = sizeof(u32);
                break;
        case NFT_META_IIFNAME:
index cca96cec1b689fcd104e273a64db6eda44171beb..d0c96c5ae29aa84057e93a5d9796014dca4c52e8 100644 (file)
@@ -272,8 +272,7 @@ tproxy_handle_time_wait4(struct sk_buff *skb, __be32 laddr, __be16 lport,
                                            hp->source, lport ? lport : hp->dest,
                                            skb->dev, NFT_LOOKUP_LISTENER);
                if (sk2) {
-                       inet_twsk_deschedule(inet_twsk(sk));
-                       inet_twsk_put(inet_twsk(sk));
+                       inet_twsk_deschedule_put(inet_twsk(sk));
                        sk = sk2;
                }
        }
@@ -437,8 +436,7 @@ tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
                                            tgi->lport ? tgi->lport : hp->dest,
                                            skb->dev, NFT_LOOKUP_LISTENER);
                if (sk2) {
-                       inet_twsk_deschedule(inet_twsk(sk));
-                       inet_twsk_put(inet_twsk(sk));
+                       inet_twsk_deschedule_put(inet_twsk(sk));
                        sk = sk2;
                }
        }
index 91b9478413ef1ee97602666142c65e9a2fad5228..6e1701de04d804ee86d31a9802e5fa5133e5ac15 100644 (file)
@@ -15,6 +15,6 @@ openvswitch-y := \
        vport-internal_dev.o \
        vport-netdev.o
 
+obj-$(CONFIG_OPENVSWITCH_VXLAN)+= vport-vxlan.o
 obj-$(CONFIG_OPENVSWITCH_GENEVE)+= vport-geneve.o
-obj-$(CONFIG_OPENVSWITCH_VXLAN)        += vport-vxlan.o
 obj-$(CONFIG_OPENVSWITCH_GRE)  += vport-gre.o
index 8a8c0b8b4f63a4bd8e5ff776250189558e6fcb1e..cf04c2f8b32a57bfec8e024d2db95dd8c0468b3f 100644 (file)
@@ -611,7 +611,7 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
                            struct sw_flow_key *key, const struct nlattr *attr,
                            const struct nlattr *actions, int actions_len)
 {
-       struct ovs_tunnel_info info;
+       struct ip_tunnel_info info;
        struct dp_upcall_info upcall;
        const struct nlattr *a;
        int rem;
@@ -733,7 +733,15 @@ static int execute_set_action(struct sk_buff *skb,
 {
        /* Only tunnel set execution is supported without a mask. */
        if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
-               OVS_CB(skb)->egress_tun_info = nla_data(a);
+               struct ovs_tunnel_info *tun = nla_data(a);
+
+               skb_dst_drop(skb);
+               dst_hold((struct dst_entry *)tun->tun_dst);
+               skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
+
+               /* FIXME: Remove when all vports have been converted */
+               OVS_CB(skb)->egress_tun_info = &tun->tun_dst->u.tun_info;
+
                return 0;
        }
 
index ff8c4a4c160986bf206f4751860f9767e71246bf..ffe984f5b95ce36f15e02de152e453233290fbf4 100644 (file)
@@ -176,7 +176,7 @@ static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
 const char *ovs_dp_name(const struct datapath *dp)
 {
        struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
-       return vport->ops->get_name(vport);
+       return ovs_vport_name(vport);
 }
 
 static int get_dpifindex(const struct datapath *dp)
@@ -188,7 +188,7 @@ static int get_dpifindex(const struct datapath *dp)
 
        local = ovs_vport_rcu(dp, OVSP_LOCAL);
        if (local)
-               ifindex = netdev_vport_priv(local)->dev->ifindex;
+               ifindex = local->dev->ifindex;
        else
                ifindex = 0;
 
@@ -1018,7 +1018,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
                }
                ovs_unlock();
 
-               ovs_nla_free_flow_actions(old_acts);
+               ovs_nla_free_flow_actions_rcu(old_acts);
                ovs_flow_free(new_flow, false);
        }
 
@@ -1030,7 +1030,7 @@ err_unlock_ovs:
        ovs_unlock();
        kfree_skb(reply);
 err_kfree_acts:
-       kfree(acts);
+       ovs_nla_free_flow_actions(acts);
 err_kfree_flow:
        ovs_flow_free(new_flow, false);
 error:
@@ -1157,7 +1157,7 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
        if (reply)
                ovs_notify(&dp_flow_genl_family, reply, info);
        if (old_acts)
-               ovs_nla_free_flow_actions(old_acts);
+               ovs_nla_free_flow_actions_rcu(old_acts);
 
        return 0;
 
@@ -1165,7 +1165,7 @@ err_unlock_ovs:
        ovs_unlock();
        kfree_skb(reply);
 err_kfree_acts:
-       kfree(acts);
+       ovs_nla_free_flow_actions(acts);
 error:
        return error;
 }
@@ -1800,7 +1800,7 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
        if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
            nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
            nla_put_string(skb, OVS_VPORT_ATTR_NAME,
-                          vport->ops->get_name(vport)))
+                          ovs_vport_name(vport)))
                goto nla_put_failure;
 
        ovs_vport_get_stats(vport, &vport_stats);
@@ -2219,13 +2219,10 @@ static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
                        struct vport *vport;
 
                        hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
-                               struct netdev_vport *netdev_vport;
-
                                if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
                                        continue;
 
-                               netdev_vport = netdev_vport_priv(vport);
-                               if (dev_net(netdev_vport->dev) == dnet)
+                               if (dev_net(vport->dev) == dnet)
                                        list_add(&vport->detach_list, head);
                        }
                }
index cd691e935e08c76b75e8ed90d7ec79ce9875ee3c..6b28c5cedb23826fe151958552234332ef7ca201 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 #include <linux/u64_stats_sync.h>
+#include <net/ip_tunnels.h>
 
 #include "flow.h"
 #include "flow_table.h"
@@ -98,7 +99,7 @@ struct datapath {
  * when a packet is received by OVS.
  */
 struct ovs_skb_cb {
-       struct ovs_tunnel_info  *egress_tun_info;
+       struct ip_tunnel_info  *egress_tun_info;
        struct vport            *input_vport;
 };
 #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
@@ -114,7 +115,7 @@ struct ovs_skb_cb {
  * @egress_tun_info: If nonnull, becomes %OVS_PACKET_ATTR_EGRESS_TUN_KEY.
  */
 struct dp_upcall_info {
-       const struct ovs_tunnel_info *egress_tun_info;
+       const struct ip_tunnel_info *egress_tun_info;
        const struct nlattr *userdata;
        const struct nlattr *actions;
        int actions_len;
index 2c631fe76be191c1a7dd1bc33c1901c8507621b4..a7a80a6b77b0ab15ecdd5859ee732417ee4a4705 100644 (file)
@@ -58,13 +58,10 @@ void ovs_dp_notify_wq(struct work_struct *work)
                        struct hlist_node *n;
 
                        hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) {
-                               struct netdev_vport *netdev_vport;
-
                                if (vport->ops->type != OVS_VPORT_TYPE_NETDEV)
                                        continue;
 
-                               netdev_vport = netdev_vport_priv(vport);
-                               if (!(netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH))
+                               if (!(vport->dev->priv_flags & IFF_OVS_DATAPATH))
                                        dp_detach_port_notify(vport);
                        }
                }
index bc7b0aba994adf6f8ec8cfc2b3278b46d19621db..8db22ef73626c8cb9b428d77f3ec24eea8b61d97 100644 (file)
@@ -682,12 +682,12 @@ int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
        return key_extract(skb, key);
 }
 
-int ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info,
+int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
                         struct sk_buff *skb, struct sw_flow_key *key)
 {
        /* Extract metadata from packet. */
        if (tun_info) {
-               memcpy(&key->tun_key, &tun_info->tunnel, sizeof(key->tun_key));
+               memcpy(&key->tun_key, &tun_info->key, sizeof(key->tun_key));
 
                if (tun_info->options) {
                        BUILD_BUG_ON((1 << (sizeof(tun_info->options_len) *
index a076e445ccc2e267f2664ddd6e81badc98a2636b..b62cdb3e35892aa56468fbc17b1cd0ef3450af6a 100644 (file)
 #include <linux/time.h>
 #include <linux/flex_array.h>
 #include <net/inet_ecn.h>
+#include <net/ip_tunnels.h>
+#include <net/dst_metadata.h>
 
 struct sk_buff;
 
-/* Used to memset ovs_key_ipv4_tunnel padding. */
-#define OVS_TUNNEL_KEY_SIZE                                    \
-       (offsetof(struct ovs_key_ipv4_tunnel, tp_dst) +         \
-        FIELD_SIZEOF(struct ovs_key_ipv4_tunnel, tp_dst))
-
-struct ovs_key_ipv4_tunnel {
-       __be64 tun_id;
-       __be32 ipv4_src;
-       __be32 ipv4_dst;
-       __be16 tun_flags;
-       u8   ipv4_tos;
-       u8   ipv4_ttl;
-       __be16 tp_src;
-       __be16 tp_dst;
-} __packed __aligned(4); /* Minimize padding. */
-
-struct ovs_tunnel_info {
-       struct ovs_key_ipv4_tunnel tunnel;
-       const void *options;
-       u8 options_len;
-};
-
 /* Store options at the end of the array if they are less than the
  * maximum size. This allows us to get the benefits of variable length
  * matching for small options.
@@ -66,54 +46,9 @@ struct ovs_tunnel_info {
 #define TUN_METADATA_OPTS(flow_key, opt_len) \
        ((void *)((flow_key)->tun_opts + TUN_METADATA_OFFSET(opt_len)))
 
-static inline void __ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
-                                           __be32 saddr, __be32 daddr,
-                                           u8 tos, u8 ttl,
-                                           __be16 tp_src,
-                                           __be16 tp_dst,
-                                           __be64 tun_id,
-                                           __be16 tun_flags,
-                                           const void *opts,
-                                           u8 opts_len)
-{
-       tun_info->tunnel.tun_id = tun_id;
-       tun_info->tunnel.ipv4_src = saddr;
-       tun_info->tunnel.ipv4_dst = daddr;
-       tun_info->tunnel.ipv4_tos = tos;
-       tun_info->tunnel.ipv4_ttl = ttl;
-       tun_info->tunnel.tun_flags = tun_flags;
-
-       /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of
-        * the upper tunnel are used.
-        * E.g: GRE over IPSEC, the tp_src and tp_port are zero.
-        */
-       tun_info->tunnel.tp_src = tp_src;
-       tun_info->tunnel.tp_dst = tp_dst;
-
-       /* Clear struct padding. */
-       if (sizeof(tun_info->tunnel) != OVS_TUNNEL_KEY_SIZE)
-               memset((unsigned char *)&tun_info->tunnel + OVS_TUNNEL_KEY_SIZE,
-                      0, sizeof(tun_info->tunnel) - OVS_TUNNEL_KEY_SIZE);
-
-       tun_info->options = opts;
-       tun_info->options_len = opts_len;
-}
-
-static inline void ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
-                                         const struct iphdr *iph,
-                                         __be16 tp_src,
-                                         __be16 tp_dst,
-                                         __be64 tun_id,
-                                         __be16 tun_flags,
-                                         const void *opts,
-                                         u8 opts_len)
-{
-       __ovs_flow_tun_info_init(tun_info, iph->saddr, iph->daddr,
-                                iph->tos, iph->ttl,
-                                tp_src, tp_dst,
-                                tun_id, tun_flags,
-                                opts, opts_len);
-}
+struct ovs_tunnel_info {
+       struct metadata_dst     *tun_dst;
+};
 
 #define OVS_SW_FLOW_KEY_METADATA_SIZE                  \
        (offsetof(struct sw_flow_key, recirc_id) +      \
@@ -122,7 +57,7 @@ static inline void ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
 struct sw_flow_key {
        u8 tun_opts[255];
        u8 tun_opts_len;
-       struct ovs_key_ipv4_tunnel tun_key;  /* Encapsulating tunnel key. */
+       struct ip_tunnel_key tun_key;   /* Encapsulating tunnel key. */
        struct {
                u32     priority;       /* Packet QoS priority. */
                u32     skb_mark;       /* SKB mark. */
@@ -273,7 +208,7 @@ void ovs_flow_stats_clear(struct sw_flow *);
 u64 ovs_flow_used_time(unsigned long flow_jiffies);
 
 int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key);
-int ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info,
+int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
                         struct sk_buff *skb,
                         struct sw_flow_key *key);
 /* Extract key from packet coming from userspace. */
index 624e41c4267fe0206fe94ede37e3160d80497abf..a6eb77ab1a6456768338a55290955bb29c69749a 100644 (file)
@@ -47,9 +47,9 @@
 #include <net/ipv6.h>
 #include <net/ndisc.h>
 #include <net/mpls.h>
+#include <net/vxlan.h>
 
 #include "flow_netlink.h"
-#include "vport-vxlan.h"
 
 struct ovs_len_tbl {
        int len;
@@ -475,7 +475,7 @@ static int vxlan_tun_opt_from_nlattr(const struct nlattr *a,
 {
        struct nlattr *tb[OVS_VXLAN_EXT_MAX+1];
        unsigned long opt_key_offset;
-       struct ovs_vxlan_opts opts;
+       struct vxlan_metadata opts;
        int err;
 
        BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
@@ -626,7 +626,7 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
 static int vxlan_opt_to_nlattr(struct sk_buff *skb,
                               const void *tun_opts, int swkey_tun_opts_len)
 {
-       const struct ovs_vxlan_opts *opts = tun_opts;
+       const struct vxlan_metadata *opts = tun_opts;
        struct nlattr *nla;
 
        nla = nla_nest_start(skb, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
@@ -641,7 +641,7 @@ static int vxlan_opt_to_nlattr(struct sk_buff *skb,
 }
 
 static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
-                               const struct ovs_key_ipv4_tunnel *output,
+                               const struct ip_tunnel_key *output,
                                const void *tun_opts, int swkey_tun_opts_len)
 {
        if (output->tun_flags & TUNNEL_KEY &&
@@ -689,7 +689,7 @@ static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
 }
 
 static int ipv4_tun_to_nlattr(struct sk_buff *skb,
-                             const struct ovs_key_ipv4_tunnel *output,
+                             const struct ip_tunnel_key *output,
                              const void *tun_opts, int swkey_tun_opts_len)
 {
        struct nlattr *nla;
@@ -708,9 +708,9 @@ static int ipv4_tun_to_nlattr(struct sk_buff *skb,
 }
 
 int ovs_nla_put_egress_tunnel_key(struct sk_buff *skb,
-                                 const struct ovs_tunnel_info *egress_tun_info)
+                                 const struct ip_tunnel_info *egress_tun_info)
 {
-       return __ipv4_tun_to_nlattr(skb, &egress_tun_info->tunnel,
+       return __ipv4_tun_to_nlattr(skb, &egress_tun_info->key,
                                    egress_tun_info->options,
                                    egress_tun_info->options_len);
 }
@@ -1548,11 +1548,48 @@ static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log)
        return sfa;
 }
 
+static void ovs_nla_free_set_action(const struct nlattr *a)
+{
+       const struct nlattr *ovs_key = nla_data(a);
+       struct ovs_tunnel_info *ovs_tun;
+
+       switch (nla_type(ovs_key)) {
+       case OVS_KEY_ATTR_TUNNEL_INFO:
+               ovs_tun = nla_data(ovs_key);
+               dst_release((struct dst_entry *)ovs_tun->tun_dst);
+               break;
+       }
+}
+
+void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
+{
+       const struct nlattr *a;
+       int rem;
+
+       if (!sf_acts)
+               return;
+
+       nla_for_each_attr(a, sf_acts->actions, sf_acts->actions_len, rem) {
+               switch (nla_type(a)) {
+               case OVS_ACTION_ATTR_SET:
+                       ovs_nla_free_set_action(a);
+                       break;
+               }
+       }
+
+       kfree(sf_acts);
+}
+
+static void __ovs_nla_free_flow_actions(struct rcu_head *head)
+{
+       ovs_nla_free_flow_actions(container_of(head, struct sw_flow_actions, rcu));
+}
+
 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
  * The caller must hold rcu_read_lock for this to be sensible. */
-void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
+void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions *sf_acts)
 {
-       kfree_rcu(sf_acts, rcu);
+       call_rcu(&sf_acts->rcu, __ovs_nla_free_flow_actions);
 }
 
 static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
@@ -1746,7 +1783,9 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
 {
        struct sw_flow_match match;
        struct sw_flow_key key;
-       struct ovs_tunnel_info *tun_info;
+       struct metadata_dst *tun_dst;
+       struct ip_tunnel_info *tun_info;
+       struct ovs_tunnel_info *ovs_tun;
        struct nlattr *a;
        int err = 0, start, opts_type;
 
@@ -1771,13 +1810,23 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
        if (start < 0)
                return start;
 
+       tun_dst = metadata_dst_alloc(key.tun_opts_len, GFP_KERNEL);
+       if (!tun_dst)
+               return -ENOMEM;
+
        a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL,
-                        sizeof(*tun_info) + key.tun_opts_len, log);
-       if (IS_ERR(a))
+                        sizeof(*ovs_tun), log);
+       if (IS_ERR(a)) {
+               dst_release((struct dst_entry *)tun_dst);
                return PTR_ERR(a);
+       }
+
+       ovs_tun = nla_data(a);
+       ovs_tun->tun_dst = tun_dst;
 
-       tun_info = nla_data(a);
-       tun_info->tunnel = key.tun_key;
+       tun_info = &tun_dst->u.tun_info;
+       tun_info->mode = IP_TUNNEL_INFO_TX;
+       tun_info->key = key.tun_key;
        tun_info->options_len = key.tun_opts_len;
 
        if (tun_info->options_len) {
@@ -2177,7 +2226,7 @@ int ovs_nla_copy_actions(const struct nlattr *attr,
        err = __ovs_nla_copy_actions(attr, key, 0, sfa, key->eth.type,
                                     key->eth.tci, log);
        if (err)
-               kfree(*sfa);
+               ovs_nla_free_flow_actions(*sfa);
 
        return err;
 }
@@ -2227,13 +2276,14 @@ static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
 
        switch (key_type) {
        case OVS_KEY_ATTR_TUNNEL_INFO: {
-               struct ovs_tunnel_info *tun_info = nla_data(ovs_key);
+               struct ovs_tunnel_info *ovs_tun = nla_data(ovs_key);
+               struct ip_tunnel_info *tun_info = &ovs_tun->tun_dst->u.tun_info;
 
                start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
                if (!start)
                        return -EMSGSIZE;
 
-               err = ipv4_tun_to_nlattr(skb, &tun_info->tunnel,
+               err = ipv4_tun_to_nlattr(skb, &tun_info->key,
                                         tun_info->options_len ?
                                                tun_info->options : NULL,
                                         tun_info->options_len);
index 5c3d75bff3104a1ba0ea1b916900514245e9af2f..acd074408f0aa2167410e3dbf6a05052ff3b4a4b 100644 (file)
@@ -55,7 +55,7 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb);
 int ovs_nla_get_match(struct sw_flow_match *, const struct nlattr *key,
                      const struct nlattr *mask, bool log);
 int ovs_nla_put_egress_tunnel_key(struct sk_buff *,
-                                 const struct ovs_tunnel_info *);
+                                 const struct ip_tunnel_info *);
 
 bool ovs_nla_get_ufid(struct sw_flow_id *, const struct nlattr *, bool log);
 int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
@@ -69,5 +69,6 @@ int ovs_nla_put_actions(const struct nlattr *attr,
                        int len, struct sk_buff *skb);
 
 void ovs_nla_free_flow_actions(struct sw_flow_actions *);
+void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions *);
 
 #endif /* flow_netlink.h */
index 65523948fb95e7cf7843efd447c7fc62b0c51b71..3a9d1dde76ed3457bbe527053d95bd9ec774f100 100644 (file)
@@ -18,6 +18,7 @@
 
 #include "flow.h"
 #include "datapath.h"
+#include "flow_netlink.h"
 #include <linux/uaccess.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -143,7 +144,8 @@ static void flow_free(struct sw_flow *flow)
 
        if (ovs_identifier_is_key(&flow->id))
                kfree(flow->id.unmasked_key);
-       kfree((struct sw_flow_actions __force *)flow->sf_acts);
+       if (flow->sf_acts)
+               ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
        for_each_node(node)
                if (flow->stats[node])
                        kmem_cache_free(flow_stats_cache,
index 208c576bd1b683d909f1d9c2e2c09de610057f55..1da3a14d10101f78881c392a06b49aa9e53c9a13 100644 (file)
@@ -77,7 +77,7 @@ static void geneve_rcv(struct geneve_sock *gs, struct sk_buff *skb)
        struct vport *vport = gs->rcv_data;
        struct genevehdr *geneveh = geneve_hdr(skb);
        int opts_len;
-       struct ovs_tunnel_info tun_info;
+       struct ip_tunnel_info tun_info;
        __be64 key;
        __be16 flags;
 
@@ -90,10 +90,9 @@ static void geneve_rcv(struct geneve_sock *gs, struct sk_buff *skb)
 
        key = vni_to_tunnel_id(geneveh->vni);
 
-       ovs_flow_tun_info_init(&tun_info, ip_hdr(skb),
-                              udp_hdr(skb)->source, udp_hdr(skb)->dest,
-                              key, flags,
-                              geneveh->options, opts_len);
+       ip_tunnel_info_init(&tun_info, ip_hdr(skb),
+                           udp_hdr(skb)->source, udp_hdr(skb)->dest,
+                           key, flags, geneveh->options, opts_len);
 
        ovs_vport_receive(vport, skb, &tun_info);
 }
@@ -165,8 +164,8 @@ error:
 
 static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
 {
-       const struct ovs_key_ipv4_tunnel *tun_key;
-       struct ovs_tunnel_info *tun_info;
+       const struct ip_tunnel_key *tun_key;
+       struct ip_tunnel_info *tun_info;
        struct net *net = ovs_dp_get_net(vport->dp);
        struct geneve_port *geneve_port = geneve_vport(vport);
        __be16 dport = inet_sk(geneve_port->gs->sock->sk)->inet_sport;
@@ -183,7 +182,7 @@ static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
                goto error;
        }
 
-       tun_key = &tun_info->tunnel;
+       tun_key = &tun_info->key;
        rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_UDP);
        if (IS_ERR(rt)) {
                err = PTR_ERR(rt);
@@ -225,7 +224,7 @@ static const char *geneve_get_name(const struct vport *vport)
 }
 
 static int geneve_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-                                     struct ovs_tunnel_info *egress_tun_info)
+                                     struct ip_tunnel_info *egress_tun_info)
 {
        struct geneve_port *geneve_port = geneve_vport(vport);
        struct net *net = ovs_dp_get_net(vport->dp);
index f17ac9642f4ee3cca4ce9bece9bafa32de786621..b87656c66aaffe3b6ba905cbb84cdc0ec0b0e31b 100644 (file)
@@ -67,9 +67,9 @@ static struct sk_buff *__build_header(struct sk_buff *skb,
                                      int tunnel_hlen)
 {
        struct tnl_ptk_info tpi;
-       const struct ovs_key_ipv4_tunnel *tun_key;
+       const struct ip_tunnel_key *tun_key;
 
-       tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
+       tun_key = &OVS_CB(skb)->egress_tun_info->key;
 
        skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
        if (IS_ERR(skb))
@@ -97,7 +97,7 @@ static __be64 key_to_tunnel_id(__be32 key, __be32 seq)
 static int gre_rcv(struct sk_buff *skb,
                   const struct tnl_ptk_info *tpi)
 {
-       struct ovs_tunnel_info tun_info;
+       struct ip_tunnel_info tun_info;
        struct ovs_net *ovs_net;
        struct vport *vport;
        __be64 key;
@@ -108,8 +108,8 @@ static int gre_rcv(struct sk_buff *skb,
                return PACKET_REJECT;
 
        key = key_to_tunnel_id(tpi->key, tpi->seq);
-       ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), 0, 0, key,
-                              filter_tnl_flags(tpi->flags), NULL, 0);
+       ip_tunnel_info_init(&tun_info, ip_hdr(skb), 0, 0, key,
+                           filter_tnl_flags(tpi->flags), NULL, 0);
 
        ovs_vport_receive(vport, skb, &tun_info);
        return PACKET_RCVD;
@@ -134,7 +134,7 @@ static int gre_err(struct sk_buff *skb, u32 info,
 static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
 {
        struct net *net = ovs_dp_get_net(vport->dp);
-       const struct ovs_key_ipv4_tunnel *tun_key;
+       const struct ip_tunnel_key *tun_key;
        struct flowi4 fl;
        struct rtable *rt;
        int min_headroom;
@@ -147,7 +147,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
                goto err_free_skb;
        }
 
-       tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
+       tun_key = &OVS_CB(skb)->egress_tun_info->key;
        rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_GRE);
        if (IS_ERR(rt)) {
                err = PTR_ERR(rt);
@@ -277,7 +277,7 @@ static void gre_tnl_destroy(struct vport *vport)
 }
 
 static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-                                  struct ovs_tunnel_info *egress_tun_info)
+                                  struct ip_tunnel_info *egress_tun_info)
 {
        return ovs_tunnel_get_egress_info(egress_tun_info,
                                          ovs_dp_get_net(vport->dp),
index 6a55f71055051957685b11e8950ba1e4197dce20..c058bbf876c343691d1874e632926e80ac4b0130 100644 (file)
@@ -156,49 +156,44 @@ static void do_setup(struct net_device *netdev)
 static struct vport *internal_dev_create(const struct vport_parms *parms)
 {
        struct vport *vport;
-       struct netdev_vport *netdev_vport;
        struct internal_dev *internal_dev;
        int err;
 
-       vport = ovs_vport_alloc(sizeof(struct netdev_vport),
-                               &ovs_internal_vport_ops, parms);
+       vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
        if (IS_ERR(vport)) {
                err = PTR_ERR(vport);
                goto error;
        }
 
-       netdev_vport = netdev_vport_priv(vport);
-
-       netdev_vport->dev = alloc_netdev(sizeof(struct internal_dev),
-                                        parms->name, NET_NAME_UNKNOWN,
-                                        do_setup);
-       if (!netdev_vport->dev) {
+       vport->dev = alloc_netdev(sizeof(struct internal_dev),
+                                 parms->name, NET_NAME_UNKNOWN, do_setup);
+       if (!vport->dev) {
                err = -ENOMEM;
                goto error_free_vport;
        }
 
-       dev_net_set(netdev_vport->dev, ovs_dp_get_net(vport->dp));
-       internal_dev = internal_dev_priv(netdev_vport->dev);
+       dev_net_set(vport->dev, ovs_dp_get_net(vport->dp));
+       internal_dev = internal_dev_priv(vport->dev);
        internal_dev->vport = vport;
 
        /* Restrict bridge port to current netns. */
        if (vport->port_no == OVSP_LOCAL)
-               netdev_vport->dev->features |= NETIF_F_NETNS_LOCAL;
+               vport->dev->features |= NETIF_F_NETNS_LOCAL;
 
        rtnl_lock();
-       err = register_netdevice(netdev_vport->dev);
+       err = register_netdevice(vport->dev);
        if (err)
                goto error_free_netdev;
 
-       dev_set_promiscuity(netdev_vport->dev, 1);
+       dev_set_promiscuity(vport->dev, 1);
        rtnl_unlock();
-       netif_start_queue(netdev_vport->dev);
+       netif_start_queue(vport->dev);
 
        return vport;
 
 error_free_netdev:
        rtnl_unlock();
-       free_netdev(netdev_vport->dev);
+       free_netdev(vport->dev);
 error_free_vport:
        ovs_vport_free(vport);
 error:
@@ -207,21 +202,19 @@ error:
 
 static void internal_dev_destroy(struct vport *vport)
 {
-       struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-
-       netif_stop_queue(netdev_vport->dev);
+       netif_stop_queue(vport->dev);
        rtnl_lock();
-       dev_set_promiscuity(netdev_vport->dev, -1);
+       dev_set_promiscuity(vport->dev, -1);
 
        /* unregister_netdevice() waits for an RCU grace period. */
-       unregister_netdevice(netdev_vport->dev);
+       unregister_netdevice(vport->dev);
 
        rtnl_unlock();
 }
 
 static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
 {
-       struct net_device *netdev = netdev_vport_priv(vport)->dev;
+       struct net_device *netdev = vport->dev;
        int len;
 
        if (unlikely(!(netdev->flags & IFF_UP))) {
@@ -249,7 +242,6 @@ static struct vport_ops ovs_internal_vport_ops = {
        .type           = OVS_VPORT_TYPE_INTERNAL,
        .create         = internal_dev_create,
        .destroy        = internal_dev_destroy,
-       .get_name       = ovs_netdev_get_name,
        .send           = internal_dev_recv,
 };
 
index 33e6d6e2908f553516c5ca97c4b93abee7b7057b..cddb7069b11b7852093baa0efc67eddb73bf3a2b 100644 (file)
 #include <linux/rtnetlink.h>
 #include <linux/skbuff.h>
 #include <linux/openvswitch.h>
+#include <linux/export.h>
 
-#include <net/llc.h>
+#include <net/ip_tunnels.h>
+#include <net/rtnetlink.h>
 
 #include "datapath.h"
+#include "vport.h"
 #include "vport-internal_dev.h"
 #include "vport-netdev.h"
 
@@ -54,7 +57,7 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)
        skb_push(skb, ETH_HLEN);
        ovs_skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
 
-       ovs_vport_receive(vport, skb, NULL);
+       ovs_vport_receive(vport, skb, skb_tunnel_info(skb, AF_INET));
        return;
 
 error:
@@ -83,104 +86,96 @@ static struct net_device *get_dpdev(const struct datapath *dp)
 
        local = ovs_vport_ovsl(dp, OVSP_LOCAL);
        BUG_ON(!local);
-       return netdev_vport_priv(local)->dev;
+       return local->dev;
 }
 
-static struct vport *netdev_create(const struct vport_parms *parms)
+struct vport *ovs_netdev_link(struct vport *vport, const char *name)
 {
-       struct vport *vport;
-       struct netdev_vport *netdev_vport;
        int err;
 
-       vport = ovs_vport_alloc(sizeof(struct netdev_vport),
-                               &ovs_netdev_vport_ops, parms);
-       if (IS_ERR(vport)) {
-               err = PTR_ERR(vport);
-               goto error;
-       }
-
-       netdev_vport = netdev_vport_priv(vport);
-
-       netdev_vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), parms->name);
-       if (!netdev_vport->dev) {
+       vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), name);
+       if (!vport->dev) {
                err = -ENODEV;
                goto error_free_vport;
        }
 
-       if (netdev_vport->dev->flags & IFF_LOOPBACK ||
-           netdev_vport->dev->type != ARPHRD_ETHER ||
-           ovs_is_internal_dev(netdev_vport->dev)) {
+       if (vport->dev->flags & IFF_LOOPBACK ||
+           vport->dev->type != ARPHRD_ETHER ||
+           ovs_is_internal_dev(vport->dev)) {
                err = -EINVAL;
                goto error_put;
        }
 
        rtnl_lock();
-       err = netdev_master_upper_dev_link(netdev_vport->dev,
+       err = netdev_master_upper_dev_link(vport->dev,
                                           get_dpdev(vport->dp));
        if (err)
                goto error_unlock;
 
-       err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook,
+       err = netdev_rx_handler_register(vport->dev, netdev_frame_hook,
                                         vport);
        if (err)
                goto error_master_upper_dev_unlink;
 
-       dev_disable_lro(netdev_vport->dev);
-       dev_set_promiscuity(netdev_vport->dev, 1);
-       netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
+       dev_disable_lro(vport->dev);
+       dev_set_promiscuity(vport->dev, 1);
+       vport->dev->priv_flags |= IFF_OVS_DATAPATH;
        rtnl_unlock();
 
        return vport;
 
 error_master_upper_dev_unlink:
-       netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp));
+       netdev_upper_dev_unlink(vport->dev, get_dpdev(vport->dp));
 error_unlock:
        rtnl_unlock();
 error_put:
-       dev_put(netdev_vport->dev);
+       dev_put(vport->dev);
 error_free_vport:
        ovs_vport_free(vport);
-error:
        return ERR_PTR(err);
 }
+EXPORT_SYMBOL_GPL(ovs_netdev_link);
+
+static struct vport *netdev_create(const struct vport_parms *parms)
+{
+       struct vport *vport;
+
+       vport = ovs_vport_alloc(0, &ovs_netdev_vport_ops, parms);
+       if (IS_ERR(vport))
+               return vport;
+
+       return ovs_netdev_link(vport, parms->name);
+}
 
-static void free_port_rcu(struct rcu_head *rcu)
+void ovs_vport_free_rcu(struct rcu_head *rcu)
 {
-       struct netdev_vport *netdev_vport = container_of(rcu,
-                                       struct netdev_vport, rcu);
+       struct vport *vport = container_of(rcu, struct vport, rcu);
 
-       dev_put(netdev_vport->dev);
-       ovs_vport_free(vport_from_priv(netdev_vport));
+       if (vport->dev)
+               dev_put(vport->dev);
+       ovs_vport_free(vport);
 }
+EXPORT_SYMBOL_GPL(ovs_vport_free_rcu);
 
 void ovs_netdev_detach_dev(struct vport *vport)
 {
-       struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-
        ASSERT_RTNL();
-       netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
-       netdev_rx_handler_unregister(netdev_vport->dev);
-       netdev_upper_dev_unlink(netdev_vport->dev,
-                               netdev_master_upper_dev_get(netdev_vport->dev));
-       dev_set_promiscuity(netdev_vport->dev, -1);
+       vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
+       netdev_rx_handler_unregister(vport->dev);
+       netdev_upper_dev_unlink(vport->dev,
+                               netdev_master_upper_dev_get(vport->dev));
+       dev_set_promiscuity(vport->dev, -1);
 }
+EXPORT_SYMBOL_GPL(ovs_netdev_detach_dev);
 
 static void netdev_destroy(struct vport *vport)
 {
-       struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-
        rtnl_lock();
-       if (netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH)
+       if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
                ovs_netdev_detach_dev(vport);
        rtnl_unlock();
 
-       call_rcu(&netdev_vport->rcu, free_port_rcu);
-}
-
-const char *ovs_netdev_get_name(const struct vport *vport)
-{
-       const struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-       return netdev_vport->dev->name;
+       call_rcu(&vport->rcu, ovs_vport_free_rcu);
 }
 
 static unsigned int packet_length(const struct sk_buff *skb)
@@ -193,20 +188,19 @@ static unsigned int packet_length(const struct sk_buff *skb)
        return length;
 }
 
-static int netdev_send(struct vport *vport, struct sk_buff *skb)
+int ovs_netdev_send(struct vport *vport, struct sk_buff *skb)
 {
-       struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
-       int mtu = netdev_vport->dev->mtu;
+       int mtu = vport->dev->mtu;
        int len;
 
        if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
                net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
-                                    netdev_vport->dev->name,
+                                    vport->dev->name,
                                     packet_length(skb), mtu);
                goto drop;
        }
 
-       skb->dev = netdev_vport->dev;
+       skb->dev = vport->dev;
        len = skb->len;
        dev_queue_xmit(skb);
 
@@ -216,6 +210,7 @@ drop:
        kfree_skb(skb);
        return 0;
 }
+EXPORT_SYMBOL_GPL(ovs_netdev_send);
 
 /* Returns null if this device is not attached to a datapath. */
 struct vport *ovs_netdev_get_vport(struct net_device *dev)
@@ -231,8 +226,7 @@ static struct vport_ops ovs_netdev_vport_ops = {
        .type           = OVS_VPORT_TYPE_NETDEV,
        .create         = netdev_create,
        .destroy        = netdev_destroy,
-       .get_name       = ovs_netdev_get_name,
-       .send           = netdev_send,
+       .send           = ovs_netdev_send,
 };
 
 int __init ovs_netdev_init(void)
index 6f7038e79c524cc66dc53188992b0ed9ff6c23ed..804412697a90c46f43214d2e80ac447c23aea6ae 100644 (file)
 
 struct vport *ovs_netdev_get_vport(struct net_device *dev);
 
-struct netdev_vport {
-       struct rcu_head rcu;
-
-       struct net_device *dev;
-};
-
-static inline struct netdev_vport *
-netdev_vport_priv(const struct vport *vport)
-{
-       return vport_priv(vport);
-}
-
-const char *ovs_netdev_get_name(const struct vport *);
+struct vport *ovs_netdev_link(struct vport *vport, const char *name);
+int ovs_netdev_send(struct vport *vport, struct sk_buff *skb);
 void ovs_netdev_detach_dev(struct vport *);
+void ovs_vport_free_rcu(struct rcu_head *);
 
 int __init ovs_netdev_init(void);
 void ovs_netdev_exit(void);
index 6d39766e7828c4351a004fa8ce7d839348c3681d..547173336cd308567c6815170a7ff542710b5705 100644 (file)
  * 02110-1301, USA
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/net.h>
-#include <linux/rculist.h>
-#include <linux/udp.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/openvswitch.h>
 #include <linux/module.h>
-
-#include <net/icmp.h>
-#include <net/ip.h>
 #include <net/udp.h>
 #include <net/ip_tunnels.h>
 #include <net/rtnetlink.h>
-#include <net/route.h>
-#include <net/dsfield.h>
-#include <net/inet_ecn.h>
-#include <net/net_namespace.h>
-#include <net/netns/generic.h>
 #include <net/vxlan.h>
 
 #include "datapath.h"
 #include "vport.h"
-#include "vport-vxlan.h"
-
-/**
- * struct vxlan_port - Keeps track of open UDP ports
- * @vs: vxlan_sock created for the port.
- * @name: vport name.
- */
-struct vxlan_port {
-       struct vxlan_sock *vs;
-       char name[IFNAMSIZ];
-       u32 exts; /* VXLAN_F_* in <net/vxlan.h> */
-};
+#include "vport-netdev.h"
 
-static struct vport_ops ovs_vxlan_vport_ops;
-
-static inline struct vxlan_port *vxlan_vport(const struct vport *vport)
-{
-       return vport_priv(vport);
-}
-
-/* Called with rcu_read_lock and BH disabled. */
-static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
-                     struct vxlan_metadata *md)
-{
-       struct ovs_tunnel_info tun_info;
-       struct vxlan_port *vxlan_port;
-       struct vport *vport = vs->data;
-       struct iphdr *iph;
-       struct ovs_vxlan_opts opts = {
-               .gbp = md->gbp,
-       };
-       __be64 key;
-       __be16 flags;
-
-       flags = TUNNEL_KEY | (udp_hdr(skb)->check != 0 ? TUNNEL_CSUM : 0);
-       vxlan_port = vxlan_vport(vport);
-       if (vxlan_port->exts & VXLAN_F_GBP && md->gbp)
-               flags |= TUNNEL_VXLAN_OPT;
-
-       /* Save outer tunnel values */
-       iph = ip_hdr(skb);
-       key = cpu_to_be64(ntohl(md->vni) >> 8);
-       ovs_flow_tun_info_init(&tun_info, iph,
-                              udp_hdr(skb)->source, udp_hdr(skb)->dest,
-                              key, flags, &opts, sizeof(opts));
-
-       ovs_vport_receive(vport, skb, &tun_info);
-}
+static struct vport_ops ovs_vxlan_netdev_vport_ops;
 
 static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
 {
-       struct vxlan_port *vxlan_port = vxlan_vport(vport);
-       __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
+       struct vxlan_dev *vxlan = netdev_priv(vport->dev);
+       __be16 dst_port = vxlan->cfg.dst_port;
 
        if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port)))
                return -EMSGSIZE;
 
-       if (vxlan_port->exts) {
+       if (vxlan->flags & VXLAN_F_GBP) {
                struct nlattr *exts;
 
                exts = nla_nest_start(skb, OVS_TUNNEL_ATTR_EXTENSION);
                if (!exts)
                        return -EMSGSIZE;
 
-               if (vxlan_port->exts & VXLAN_F_GBP &&
+               if (vxlan->flags & VXLAN_F_GBP &&
                    nla_put_flag(skb, OVS_VXLAN_EXT_GBP))
                        return -EMSGSIZE;
 
@@ -114,23 +57,14 @@ static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
        return 0;
 }
 
-static void vxlan_tnl_destroy(struct vport *vport)
-{
-       struct vxlan_port *vxlan_port = vxlan_vport(vport);
-
-       vxlan_sock_release(vxlan_port->vs);
-
-       ovs_vport_deferred_free(vport);
-}
-
-static const struct nla_policy exts_policy[OVS_VXLAN_EXT_MAX+1] = {
+static const struct nla_policy exts_policy[OVS_VXLAN_EXT_MAX + 1] = {
        [OVS_VXLAN_EXT_GBP]     = { .type = NLA_FLAG, },
 };
 
-static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr)
+static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr,
+                               struct vxlan_config *conf)
 {
-       struct nlattr *exts[OVS_VXLAN_EXT_MAX+1];
-       struct vxlan_port *vxlan_port;
+       struct nlattr *exts[OVS_VXLAN_EXT_MAX + 1];
        int err;
 
        if (nla_len(attr) < sizeof(struct nlattr))
@@ -140,10 +74,8 @@ static int vxlan_configure_exts(struct vport *vport, struct nlattr *attr)
        if (err < 0)
                return err;
 
-       vxlan_port = vxlan_vport(vport);
-
        if (exts[OVS_VXLAN_EXT_GBP])
-               vxlan_port->exts |= VXLAN_F_GBP;
+               conf->flags |= VXLAN_F_GBP;
 
        return 0;
 }
@@ -152,128 +84,89 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
 {
        struct net *net = ovs_dp_get_net(parms->dp);
        struct nlattr *options = parms->options;
-       struct vxlan_port *vxlan_port;
-       struct vxlan_sock *vs;
+       struct net_device *dev;
        struct vport *vport;
        struct nlattr *a;
-       u16 dst_port;
        int err;
+       struct vxlan_config conf = {
+               .no_share = true,
+               .flags = VXLAN_F_FLOW_BASED | VXLAN_F_COLLECT_METADATA,
+       };
 
        if (!options) {
                err = -EINVAL;
                goto error;
        }
+
        a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT);
        if (a && nla_len(a) == sizeof(u16)) {
-               dst_port = nla_get_u16(a);
+               conf.dst_port = htons(nla_get_u16(a));
        } else {
                /* Require destination port from userspace. */
                err = -EINVAL;
                goto error;
        }
 
-       vport = ovs_vport_alloc(sizeof(struct vxlan_port),
-                               &ovs_vxlan_vport_ops, parms);
+       vport = ovs_vport_alloc(0, &ovs_vxlan_netdev_vport_ops, parms);
        if (IS_ERR(vport))
                return vport;
 
-       vxlan_port = vxlan_vport(vport);
-       strncpy(vxlan_port->name, parms->name, IFNAMSIZ);
-
        a = nla_find_nested(options, OVS_TUNNEL_ATTR_EXTENSION);
        if (a) {
-               err = vxlan_configure_exts(vport, a);
+               err = vxlan_configure_exts(vport, a, &conf);
                if (err) {
                        ovs_vport_free(vport);
                        goto error;
                }
        }
 
-       vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true,
-                           vxlan_port->exts);
-       if (IS_ERR(vs)) {
+       rtnl_lock();
+       dev = vxlan_dev_create(net, parms->name, NET_NAME_USER, &conf);
+       if (IS_ERR(dev)) {
+               rtnl_unlock();
                ovs_vport_free(vport);
-               return (void *)vs;
+               return ERR_CAST(dev);
        }
-       vxlan_port->vs = vs;
 
+       dev_change_flags(dev, dev->flags | IFF_UP);
+       rtnl_unlock();
        return vport;
-
 error:
        return ERR_PTR(err);
 }
 
-static int vxlan_ext_gbp(struct sk_buff *skb)
+static struct vport *vxlan_create(const struct vport_parms *parms)
 {
-       const struct ovs_tunnel_info *tun_info;
-       const struct ovs_vxlan_opts *opts;
+       struct vport *vport;
 
-       tun_info = OVS_CB(skb)->egress_tun_info;
-       opts = tun_info->options;
+       vport = vxlan_tnl_create(parms);
+       if (IS_ERR(vport))
+               return vport;
 
-       if (tun_info->tunnel.tun_flags & TUNNEL_VXLAN_OPT &&
-           tun_info->options_len >= sizeof(*opts))
-               return opts->gbp;
-       else
-               return 0;
+       return ovs_netdev_link(vport, parms->name);
 }
 
-static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
+static void vxlan_destroy(struct vport *vport)
 {
-       struct net *net = ovs_dp_get_net(vport->dp);
-       struct vxlan_port *vxlan_port = vxlan_vport(vport);
-       struct sock *sk = vxlan_port->vs->sock->sk;
-       __be16 dst_port = inet_sk(sk)->inet_sport;
-       const struct ovs_key_ipv4_tunnel *tun_key;
-       struct vxlan_metadata md = {0};
-       struct rtable *rt;
-       struct flowi4 fl;
-       __be16 src_port;
-       __be16 df;
-       int err;
-       u32 vxflags;
+       rtnl_lock();
+       if (vport->dev->priv_flags & IFF_OVS_DATAPATH)
+               ovs_netdev_detach_dev(vport);
 
-       if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
-               err = -EINVAL;
-               goto error;
-       }
+       /* Early release so we can unregister the device */
+       dev_put(vport->dev);
+       rtnl_delete_link(vport->dev);
+       vport->dev = NULL;
+       rtnl_unlock();
 
-       tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
-       rt = ovs_tunnel_route_lookup(net, tun_key, skb->mark, &fl, IPPROTO_UDP);
-       if (IS_ERR(rt)) {
-               err = PTR_ERR(rt);
-               goto error;
-       }
-
-       df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
-               htons(IP_DF) : 0;
-
-       skb->ignore_df = 1;
-
-       src_port = udp_flow_src_port(net, skb, 0, 0, true);
-       md.vni = htonl(be64_to_cpu(tun_key->tun_id) << 8);
-       md.gbp = vxlan_ext_gbp(skb);
-       vxflags = vxlan_port->exts |
-                     (tun_key->tun_flags & TUNNEL_CSUM ? VXLAN_F_UDP_CSUM : 0);
-
-       err = vxlan_xmit_skb(rt, sk, skb, fl.saddr, tun_key->ipv4_dst,
-                            tun_key->ipv4_tos, tun_key->ipv4_ttl, df,
-                            src_port, dst_port,
-                            &md, false, vxflags);
-       if (err < 0)
-               ip_rt_put(rt);
-       return err;
-error:
-       kfree_skb(skb);
-       return err;
+       call_rcu(&vport->rcu, ovs_vport_free_rcu);
 }
 
 static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-                                    struct ovs_tunnel_info *egress_tun_info)
+                                    struct ip_tunnel_info *egress_tun_info)
 {
+       struct vxlan_dev *vxlan = netdev_priv(vport->dev);
        struct net *net = ovs_dp_get_net(vport->dp);
-       struct vxlan_port *vxlan_port = vxlan_vport(vport);
-       __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
+       __be16 dst_port = vxlan_dev_dst_port(vxlan);
        __be16 src_port;
        int port_min;
        int port_max;
@@ -287,31 +180,23 @@ static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
                                          src_port, dst_port);
 }
 
-static const char *vxlan_get_name(const struct vport *vport)
-{
-       struct vxlan_port *vxlan_port = vxlan_vport(vport);
-       return vxlan_port->name;
-}
-
-static struct vport_ops ovs_vxlan_vport_ops = {
-       .type           = OVS_VPORT_TYPE_VXLAN,
-       .create         = vxlan_tnl_create,
-       .destroy        = vxlan_tnl_destroy,
-       .get_name       = vxlan_get_name,
-       .get_options    = vxlan_get_options,
-       .send           = vxlan_tnl_send,
+static struct vport_ops ovs_vxlan_netdev_vport_ops = {
+       .type                   = OVS_VPORT_TYPE_VXLAN,
+       .create                 = vxlan_create,
+       .destroy                = vxlan_destroy,
+       .get_options            = vxlan_get_options,
+       .send                   = ovs_netdev_send,
        .get_egress_tun_info    = vxlan_get_egress_tun_info,
-       .owner          = THIS_MODULE,
 };
 
 static int __init ovs_vxlan_tnl_init(void)
 {
-       return ovs_vport_ops_register(&ovs_vxlan_vport_ops);
+       return ovs_vport_ops_register(&ovs_vxlan_netdev_vport_ops);
 }
 
 static void __exit ovs_vxlan_tnl_exit(void)
 {
-       ovs_vport_ops_unregister(&ovs_vxlan_vport_ops);
+       ovs_vport_ops_unregister(&ovs_vxlan_netdev_vport_ops);
 }
 
 module_init(ovs_vxlan_tnl_init);
diff --git a/net/openvswitch/vport-vxlan.h b/net/openvswitch/vport-vxlan.h
deleted file mode 100644 (file)
index 4b08233..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef VPORT_VXLAN_H
-#define VPORT_VXLAN_H 1
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-struct ovs_vxlan_opts {
-       __u32 gbp;
-};
-
-#endif
index 067a3fff1d2cb0c629c1dc2d75d0353b9269ba71..d14f59403c5eb61cdde91cbe557617194756ccaf 100644 (file)
@@ -113,7 +113,7 @@ struct vport *ovs_vport_locate(const struct net *net, const char *name)
        struct vport *vport;
 
        hlist_for_each_entry_rcu(vport, bucket, hash_node)
-               if (!strcmp(name, vport->ops->get_name(vport)) &&
+               if (!strcmp(name, ovs_vport_name(vport)) &&
                    net_eq(ovs_dp_get_net(vport->dp), net))
                        return vport;
 
@@ -226,7 +226,7 @@ struct vport *ovs_vport_add(const struct vport_parms *parms)
                }
 
                bucket = hash_bucket(ovs_dp_get_net(vport->dp),
-                                    vport->ops->get_name(vport));
+                                    ovs_vport_name(vport));
                hlist_add_head_rcu(&vport->hash_node, bucket);
                return vport;
        }
@@ -469,7 +469,7 @@ u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb)
  * skb->data should point to the Ethernet header.
  */
 void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
-                      const struct ovs_tunnel_info *tun_info)
+                      const struct ip_tunnel_info *tun_info)
 {
        struct pcpu_sw_netstats *stats;
        struct sw_flow_key key;
@@ -572,22 +572,22 @@ void ovs_vport_deferred_free(struct vport *vport)
 }
 EXPORT_SYMBOL_GPL(ovs_vport_deferred_free);
 
-int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info,
+int ovs_tunnel_get_egress_info(struct ip_tunnel_info *egress_tun_info,
                               struct net *net,
-                              const struct ovs_tunnel_info *tun_info,
+                              const struct ip_tunnel_info *tun_info,
                               u8 ipproto,
                               u32 skb_mark,
                               __be16 tp_src,
                               __be16 tp_dst)
 {
-       const struct ovs_key_ipv4_tunnel *tun_key;
+       const struct ip_tunnel_key *tun_key;
        struct rtable *rt;
        struct flowi4 fl;
 
        if (unlikely(!tun_info))
                return -EINVAL;
 
-       tun_key = &tun_info->tunnel;
+       tun_key = &tun_info->key;
 
        /* Route lookup to get srouce IP address.
         * The process may need to be changed if the corresponding process
@@ -602,22 +602,22 @@ int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info,
        /* Generate egress_tun_info based on tun_info,
         * saddr, tp_src and tp_dst
         */
-       __ovs_flow_tun_info_init(egress_tun_info,
-                                fl.saddr, tun_key->ipv4_dst,
-                                tun_key->ipv4_tos,
-                                tun_key->ipv4_ttl,
-                                tp_src, tp_dst,
-                                tun_key->tun_id,
-                                tun_key->tun_flags,
-                                tun_info->options,
-                                tun_info->options_len);
+       __ip_tunnel_info_init(egress_tun_info,
+                             fl.saddr, tun_key->ipv4_dst,
+                             tun_key->ipv4_tos,
+                             tun_key->ipv4_ttl,
+                             tp_src, tp_dst,
+                             tun_key->tun_id,
+                             tun_key->tun_flags,
+                             tun_info->options,
+                             tun_info->options_len);
 
        return 0;
 }
 EXPORT_SYMBOL_GPL(ovs_tunnel_get_egress_info);
 
 int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-                                 struct ovs_tunnel_info *info)
+                                 struct ip_tunnel_info *info)
 {
        /* get_egress_tun_info() is only implemented on tunnel ports. */
        if (unlikely(!vport->ops->get_egress_tun_info))
index bc85331a6c60cae9182bd1348d35d81117cf2943..1a689c28b5a6356dc1ffe061c80fc537822a4e7b 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/u64_stats_sync.h>
+#include <net/route.h>
 
 #include "datapath.h"
 
@@ -58,15 +59,15 @@ u32 ovs_vport_find_upcall_portid(const struct vport *, struct sk_buff *);
 
 int ovs_vport_send(struct vport *, struct sk_buff *);
 
-int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info,
+int ovs_tunnel_get_egress_info(struct ip_tunnel_info *egress_tun_info,
                               struct net *net,
-                              const struct ovs_tunnel_info *tun_info,
+                              const struct ip_tunnel_info *tun_info,
                               u8 ipproto,
                               u32 skb_mark,
                               __be16 tp_src,
                               __be16 tp_dst);
 int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
-                                 struct ovs_tunnel_info *info);
+                                 struct ip_tunnel_info *info);
 
 /* The following definitions are for implementers of vport devices: */
 
@@ -106,7 +107,7 @@ struct vport_portids {
  * @detach_list: list used for detaching vport in net-exit call.
  */
 struct vport {
-       struct rcu_head rcu;
+       struct net_device *dev;
        struct datapath *dp;
        struct vport_portids __rcu *upcall_portids;
        u16 port_no;
@@ -119,6 +120,7 @@ struct vport {
 
        struct vport_err_stats err_stats;
        struct list_head detach_list;
+       struct rcu_head rcu;
 };
 
 /**
@@ -176,7 +178,7 @@ struct vport_ops {
 
        int (*send)(struct vport *, struct sk_buff *);
        int (*get_egress_tun_info)(struct vport *, struct sk_buff *,
-                                  struct ovs_tunnel_info *);
+                                  struct ip_tunnel_info *);
 
        struct module *owner;
        struct list_head list;
@@ -226,7 +228,7 @@ static inline struct vport *vport_from_priv(void *priv)
 }
 
 void ovs_vport_receive(struct vport *, struct sk_buff *,
-                      const struct ovs_tunnel_info *);
+                      const struct ip_tunnel_info *);
 
 static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
                                      const void *start, unsigned int len)
@@ -235,11 +237,16 @@ static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
                skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
 }
 
+static inline const char *ovs_vport_name(struct vport *vport)
+{
+       return vport->dev ? vport->dev->name : vport->ops->get_name(vport);
+}
+
 int ovs_vport_ops_register(struct vport_ops *ops);
 void ovs_vport_ops_unregister(struct vport_ops *ops);
 
 static inline struct rtable *ovs_tunnel_route_lookup(struct net *net,
-                                                    const struct ovs_key_ipv4_tunnel *key,
+                                                    const struct ip_tunnel_key *key,
                                                     u32 mark,
                                                     struct flowi4 *fl,
                                                     u8 protocol)
index ed458b315ef4153233f65fa2af0bb0db55225bf8..b5afe538bb88e9b97eec2faa4e93acbb16be4fa0 100644 (file)
@@ -518,13 +518,11 @@ static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 }
 
 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
-               int tx_ring,
                struct sk_buff_head *rb_queue)
 {
        struct tpacket_kbdq_core *pkc;
 
-       pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
-                       GET_PBDQC_FROM_RB(&po->rx_ring);
+       pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 
        spin_lock_bh(&rb_queue->lock);
        pkc->delete_blk_timer = 1;
@@ -4043,7 +4041,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
        if (closing && (po->tp_version > TPACKET_V2)) {
                /* Because we don't support block-based V3 on tx-ring */
                if (!tx_ring)
-                       prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
+                       prb_shutdown_retire_blk_timer(po, rb_queue);
        }
        release_sock(sk);
 
index 43ec92680ae8fe30d14496f5cf81a1baf53b49e3..b087087ccfa94a47b96f9778342d4d021cf4ed1a 100644 (file)
 #include <net/act_api.h>
 #include <net/netlink.h>
 
+static void free_tcf(struct rcu_head *head)
+{
+       struct tcf_common *p = container_of(head, struct tcf_common, tcfc_rcu);
+
+       free_percpu(p->cpu_bstats);
+       free_percpu(p->cpu_qstats);
+       kfree(p);
+}
+
 void tcf_hash_destroy(struct tc_action *a)
 {
        struct tcf_common *p = a->priv;
@@ -41,7 +50,7 @@ void tcf_hash_destroy(struct tc_action *a)
         * gen_estimator est_timer() might access p->tcfc_lock
         * or bstats, wait a RCU grace period before freeing p
         */
-       kfree_rcu(p, tcfc_rcu);
+       call_rcu(&p->tcfc_rcu, free_tcf);
 }
 EXPORT_SYMBOL(tcf_hash_destroy);
 
@@ -231,15 +240,16 @@ void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est)
        if (est)
                gen_kill_estimator(&pc->tcfc_bstats,
                                   &pc->tcfc_rate_est);
-       kfree_rcu(pc, tcfc_rcu);
+       call_rcu(&pc->tcfc_rcu, free_tcf);
 }
 EXPORT_SYMBOL(tcf_hash_cleanup);
 
 int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
-                   int size, int bind)
+                   int size, int bind, bool cpustats)
 {
        struct tcf_hashinfo *hinfo = a->ops->hinfo;
        struct tcf_common *p = kzalloc(size, GFP_KERNEL);
+       int err = -ENOMEM;
 
        if (unlikely(!p))
                return -ENOMEM;
@@ -247,18 +257,32 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
        if (bind)
                p->tcfc_bindcnt = 1;
 
+       if (cpustats) {
+               p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
+               if (!p->cpu_bstats) {
+err1:
+                       kfree(p);
+                       return err;
+               }
+               p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
+               if (!p->cpu_qstats) {
+err2:
+                       free_percpu(p->cpu_bstats);
+                       goto err1;
+               }
+       }
        spin_lock_init(&p->tcfc_lock);
        INIT_HLIST_NODE(&p->tcfc_head);
        p->tcfc_index = index ? index : tcf_hash_new_index(hinfo);
        p->tcfc_tm.install = jiffies;
        p->tcfc_tm.lastuse = jiffies;
        if (est) {
-               int err = gen_new_estimator(&p->tcfc_bstats, NULL,
-                                           &p->tcfc_rate_est,
-                                           &p->tcfc_lock, est);
+               err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats,
+                                       &p->tcfc_rate_est,
+                                       &p->tcfc_lock, est);
                if (err) {
-                       kfree(p);
-                       return err;
+                       free_percpu(p->cpu_qstats);
+                       goto err2;
                }
        }
 
@@ -616,10 +640,10 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
        if (err < 0)
                goto errout;
 
-       if (gnet_stats_copy_basic(&d, NULL, &p->tcfc_bstats) < 0 ||
+       if (gnet_stats_copy_basic(&d, p->cpu_bstats, &p->tcfc_bstats) < 0 ||
            gnet_stats_copy_rate_est(&d, &p->tcfc_bstats,
                                     &p->tcfc_rate_est) < 0 ||
-           gnet_stats_copy_queue(&d, NULL,
+           gnet_stats_copy_queue(&d, p->cpu_qstats,
                                  &p->tcfc_qstats,
                                  p->tcfc_qstats.qlen) < 0)
                goto errout;
index d0edeb7a1950b9a4c087f67344af0402aaa550e1..aaae8e83bf189cc99fb624bf581979df7ce6fce5 100644 (file)
@@ -305,7 +305,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
 
        if (!tcf_hash_check(parm->index, act, bind)) {
                ret = tcf_hash_create(parm->index, est, act,
-                                     sizeof(*prog), bind);
+                                     sizeof(*prog), bind, false);
                if (ret < 0)
                        goto destroy_fp;
 
index 295d14bd6c678c31b56219371df83d4ebe3b0a2c..f2b540220ad02f1f8e3b2add9c7477a334081c3d 100644 (file)
@@ -108,7 +108,8 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
        parm = nla_data(tb[TCA_CONNMARK_PARMS]);
 
        if (!tcf_hash_check(parm->index, a, bind)) {
-               ret = tcf_hash_create(parm->index, est, a, sizeof(*ci), bind);
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*ci),
+                                     bind, false);
                if (ret)
                        return ret;
 
index 4cd5cf1aedf8b14bc8a8fb0529db868ee74433fd..b07c535ba8e7c6f8dcbc52f4eb69cf4a1ab3d0c2 100644 (file)
@@ -62,7 +62,8 @@ static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
        parm = nla_data(tb[TCA_CSUM_PARMS]);
 
        if (!tcf_hash_check(parm->index, a, bind)) {
-               ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*p),
+                                     bind, false);
                if (ret)
                        return ret;
                ret = ACT_P_CREATED;
index 7fffc2272701adb42d109318a80e8fef6fc3289f..5c1b051707363e19a779fcca2de6ae38ba0239fe 100644 (file)
 #ifdef CONFIG_GACT_PROB
 static int gact_net_rand(struct tcf_gact *gact)
 {
-       if (!gact->tcfg_pval || prandom_u32() % gact->tcfg_pval)
+       smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */
+       if (prandom_u32() % gact->tcfg_pval)
                return gact->tcf_action;
        return gact->tcfg_paction;
 }
 
 static int gact_determ(struct tcf_gact *gact)
 {
-       if (!gact->tcfg_pval || gact->tcf_bstats.packets % gact->tcfg_pval)
+       u32 pack = atomic_inc_return(&gact->packets);
+
+       smp_rmb(); /* coupled with smp_wmb() in tcf_gact_init() */
+       if (pack % gact->tcfg_pval)
                return gact->tcf_action;
        return gact->tcfg_paction;
 }
@@ -85,7 +89,8 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
 #endif
 
        if (!tcf_hash_check(parm->index, a, bind)) {
-               ret = tcf_hash_create(parm->index, est, a, sizeof(*gact), bind);
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*gact),
+                                     bind, true);
                if (ret)
                        return ret;
                ret = ACT_P_CREATED;
@@ -99,16 +104,19 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
 
        gact = to_gact(a);
 
-       spin_lock_bh(&gact->tcf_lock);
+       ASSERT_RTNL();
        gact->tcf_action = parm->action;
 #ifdef CONFIG_GACT_PROB
        if (p_parm) {
                gact->tcfg_paction = p_parm->paction;
-               gact->tcfg_pval    = p_parm->pval;
+               gact->tcfg_pval    = max_t(u16, 1, p_parm->pval);
+               /* Make sure tcfg_pval is written before tcfg_ptype
+                * coupled with smp_rmb() in gact_net_rand() & gact_determ()
+                */
+               smp_wmb();
                gact->tcfg_ptype   = p_parm->ptype;
        }
 #endif
-       spin_unlock_bh(&gact->tcf_lock);
        if (ret == ACT_P_CREATED)
                tcf_hash_insert(a);
        return ret;
@@ -118,23 +126,21 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a,
                    struct tcf_result *res)
 {
        struct tcf_gact *gact = a->priv;
-       int action = TC_ACT_SHOT;
+       int action = READ_ONCE(gact->tcf_action);
 
-       spin_lock(&gact->tcf_lock);
 #ifdef CONFIG_GACT_PROB
-       if (gact->tcfg_ptype)
-               action = gact_rand[gact->tcfg_ptype](gact);
-       else
-               action = gact->tcf_action;
-#else
-       action = gact->tcf_action;
+       {
+       u32 ptype = READ_ONCE(gact->tcfg_ptype);
+
+       if (ptype)
+               action = gact_rand[ptype](gact);
+       }
 #endif
-       gact->tcf_bstats.bytes += qdisc_pkt_len(skb);
-       gact->tcf_bstats.packets++;
+       bstats_cpu_update(this_cpu_ptr(gact->common.cpu_bstats), skb);
        if (action == TC_ACT_SHOT)
-               gact->tcf_qstats.drops++;
-       gact->tcf_tm.lastuse = jiffies;
-       spin_unlock(&gact->tcf_lock);
+               qstats_drop_inc(this_cpu_ptr(gact->common.cpu_qstats));
+
+       tcf_lastuse_update(&gact->tcf_tm);
 
        return action;
 }
index cbc8dd7dd48a50e77fdafa7b8cf4041659995cbb..99c9cc1c7af9240f9df444ae158df4fa7f7f8c73 100644 (file)
@@ -114,7 +114,7 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
                index = nla_get_u32(tb[TCA_IPT_INDEX]);
 
        if (!tcf_hash_check(index, a, bind) ) {
-               ret = tcf_hash_create(index, est, a, sizeof(*ipt), bind);
+               ret = tcf_hash_create(index, est, a, sizeof(*ipt), bind, false);
                if (ret)
                        return ret;
                ret = ACT_P_CREATED;
index a42a3b257226178eb5af04054a17813c04368613..19cd8904efa0a46b9d659f36322a78e4cbb64c38 100644 (file)
@@ -35,9 +35,11 @@ static LIST_HEAD(mirred_list);
 static void tcf_mirred_release(struct tc_action *a, int bind)
 {
        struct tcf_mirred *m = to_mirred(a);
+       struct net_device *dev = rcu_dereference_protected(m->tcfm_dev, 1);
+
        list_del(&m->tcfm_list);
-       if (m->tcfm_dev)
-               dev_put(m->tcfm_dev);
+       if (dev)
+               dev_put(dev);
 }
 
 static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
@@ -93,7 +95,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
        if (!tcf_hash_check(parm->index, a, bind)) {
                if (dev == NULL)
                        return -EINVAL;
-               ret = tcf_hash_create(parm->index, est, a, sizeof(*m), bind);
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*m),
+                                     bind, true);
                if (ret)
                        return ret;
                ret = ACT_P_CREATED;
@@ -105,18 +108,18 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
        }
        m = to_mirred(a);
 
-       spin_lock_bh(&m->tcf_lock);
+       ASSERT_RTNL();
        m->tcf_action = parm->action;
        m->tcfm_eaction = parm->eaction;
        if (dev != NULL) {
                m->tcfm_ifindex = parm->ifindex;
                if (ret != ACT_P_CREATED)
-                       dev_put(m->tcfm_dev);
+                       dev_put(rcu_dereference_protected(m->tcfm_dev, 1));
                dev_hold(dev);
-               m->tcfm_dev = dev;
+               rcu_assign_pointer(m->tcfm_dev, dev);
                m->tcfm_ok_push = ok_push;
        }
-       spin_unlock_bh(&m->tcf_lock);
+
        if (ret == ACT_P_CREATED) {
                list_add(&m->tcfm_list, &mirred_list);
                tcf_hash_insert(a);
@@ -131,20 +134,22 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
        struct tcf_mirred *m = a->priv;
        struct net_device *dev;
        struct sk_buff *skb2;
+       int retval, err;
        u32 at;
-       int retval, err = 1;
 
-       spin_lock(&m->tcf_lock);
-       m->tcf_tm.lastuse = jiffies;
-       bstats_update(&m->tcf_bstats, skb);
+       tcf_lastuse_update(&m->tcf_tm);
+
+       bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
 
-       dev = m->tcfm_dev;
-       if (!dev) {
-               printk_once(KERN_NOTICE "tc mirred: target device is gone\n");
+       rcu_read_lock();
+       retval = READ_ONCE(m->tcf_action);
+       dev = rcu_dereference(m->tcfm_dev);
+       if (unlikely(!dev)) {
+               pr_notice_once("tc mirred: target device is gone\n");
                goto out;
        }
 
-       if (!(dev->flags & IFF_UP)) {
+       if (unlikely(!(dev->flags & IFF_UP))) {
                net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
                                       dev->name);
                goto out;
@@ -152,7 +157,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
 
        at = G_TC_AT(skb->tc_verd);
        skb2 = skb_clone(skb, GFP_ATOMIC);
-       if (skb2 == NULL)
+       if (!skb2)
                goto out;
 
        if (!(at & AT_EGRESS)) {
@@ -168,16 +173,13 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
        skb2->dev = dev;
        err = dev_queue_xmit(skb2);
 
-out:
        if (err) {
-               m->tcf_qstats.overlimits++;
+out:
+               qstats_overlimit_inc(this_cpu_ptr(m->common.cpu_qstats));
                if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
                        retval = TC_ACT_SHOT;
-               else
-                       retval = m->tcf_action;
-       } else
-               retval = m->tcf_action;
-       spin_unlock(&m->tcf_lock);
+       }
+       rcu_read_unlock();
 
        return retval;
 }
@@ -216,14 +218,16 @@ static int mirred_device_event(struct notifier_block *unused,
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        struct tcf_mirred *m;
 
+       ASSERT_RTNL();
        if (event == NETDEV_UNREGISTER)
                list_for_each_entry(m, &mirred_list, tcfm_list) {
-                       spin_lock_bh(&m->tcf_lock);
-                       if (m->tcfm_dev == dev) {
+                       if (rcu_access_pointer(m->tcfm_dev) == dev) {
                                dev_put(dev);
-                               m->tcfm_dev = NULL;
+                               /* Note : no rcu grace period necessary, as
+                                * net_device are already rcu protected.
+                                */
+                               RCU_INIT_POINTER(m->tcfm_dev, NULL);
                        }
-                       spin_unlock_bh(&m->tcf_lock);
                }
 
        return NOTIFY_DONE;
index 270a030d5fd099ee7b6f6d74d51b6015aa690647..5be0b3c1c5b0c9f17e3fbd4e1dc1c92c7a8e5aed 100644 (file)
@@ -55,7 +55,8 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
        parm = nla_data(tb[TCA_NAT_PARMS]);
 
        if (!tcf_hash_check(parm->index, a, bind)) {
-               ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*p),
+                                     bind, false);
                if (ret)
                        return ret;
                ret = ACT_P_CREATED;
index ff8b466a73f6c04510523843b800de6c0db4b093..e38a7701f154c97db2070b1e0b8b54fabdb8b0f3 100644 (file)
@@ -57,7 +57,8 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
        if (!tcf_hash_check(parm->index, a, bind)) {
                if (!parm->nkeys)
                        return -EINVAL;
-               ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*p),
+                                     bind, false);
                if (ret)
                        return ret;
                p = to_pedit(a);
index 6a8d9488613a76d9cb2bd03e932ef95487ca0745..d6b708d6afdf37e7c1af4e47873755fc84b1167f 100644 (file)
@@ -103,7 +103,8 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
        defdata = nla_data(tb[TCA_DEF_DATA]);
 
        if (!tcf_hash_check(parm->index, a, bind)) {
-               ret = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*d),
+                                     bind, false);
                if (ret)
                        return ret;
 
index fcfeeaf838beb9e75f07f7cbda7fb2b73237a17f..6751b5f8c046a59912b78762855e51af8e6f29e7 100644 (file)
@@ -99,7 +99,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
        parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
 
        if (!tcf_hash_check(parm->index, a, bind)) {
-               ret = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*d),
+                                     bind, false);
                if (ret)
                        return ret;
 
index d735ecf0b1a78d3fac6ac80b95931cc6cf6caba0..796785e0bf96b0e65f598d3b2dad8256485d034a 100644 (file)
@@ -116,7 +116,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
        action = parm->v_action;
 
        if (!tcf_hash_check(parm->index, a, bind)) {
-               ret = tcf_hash_create(parm->index, est, a, sizeof(*v), bind);
+               ret = tcf_hash_create(parm->index, est, a, sizeof(*v),
+                                     bind, false);
                if (ret)
                        return ret;
 
index ea611b21641241737223f34334c0189df00d11e7..4c85bd3a750cbb02c743779f28cbde6ceacb5ecf 100644 (file)
@@ -30,35 +30,16 @@ static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
                               struct tcf_result *res)
 {
        struct cls_cgroup_head *head = rcu_dereference_bh(tp->root);
-       u32 classid;
-
-       classid = task_cls_state(current)->classid;
-
-       /*
-        * Due to the nature of the classifier it is required to ignore all
-        * packets originating from softirq context as accessing `current'
-        * would lead to false results.
-        *
-        * This test assumes that all callers of dev_queue_xmit() explicitely
-        * disable bh. Knowing this, it is possible to detect softirq based
-        * calls by looking at the number of nested bh disable calls because
-        * softirqs always disables bh.
-        */
-       if (in_serving_softirq()) {
-               /* If there is an sk_classid we'll use that. */
-               if (!skb->sk)
-                       return -1;
-               classid = skb->sk->sk_classid;
-       }
+       u32 classid = task_get_classid(skb);
 
        if (!classid)
                return -1;
-
        if (!tcf_em_tree_match(skb, &head->ematches, NULL))
                return -1;
 
        res->classid = classid;
        res->class = 0;
+
        return tcf_exts_exec(skb, &head->exts, res);
 }
 
index b8d73bca683cc25d946163d9e856d8ed332ad4d7..ffaeea63d47381c480a7c97e00c64c7a4eb6e80d 100644 (file)
@@ -186,7 +186,6 @@ struct qfq_sched {
 
        u64                     oldV, V;        /* Precise virtual times. */
        struct qfq_aggregate    *in_serv_agg;   /* Aggregate being served. */
-       u32                     num_active_agg; /* Num. of active aggregates */
        u32                     wsum;           /* weight sum */
        u32                     iwsum;          /* inverse weight sum */
 
index 59e80356672bdf89777265ae1f8c384792dfb98c..4345790ad3266c353eeac5398593c2a9ce4effda 100644 (file)
@@ -487,23 +487,35 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
         */
        rcu_read_lock();
        list_for_each_entry_rcu(laddr, &bp->address_list, list) {
+               struct net_device *odev;
+
                if (!laddr->valid)
                        continue;
-               if ((laddr->state == SCTP_ADDR_SRC) &&
-                   (AF_INET == laddr->a.sa.sa_family)) {
-                       fl4->fl4_sport = laddr->a.v4.sin_port;
-                       flowi4_update_output(fl4,
-                                            asoc->base.sk->sk_bound_dev_if,
-                                            RT_CONN_FLAGS(asoc->base.sk),
-                                            daddr->v4.sin_addr.s_addr,
-                                            laddr->a.v4.sin_addr.s_addr);
-
-                       rt = ip_route_output_key(sock_net(sk), fl4);
-                       if (!IS_ERR(rt)) {
-                               dst = &rt->dst;
-                               goto out_unlock;
-                       }
-               }
+               if (laddr->state != SCTP_ADDR_SRC ||
+                   AF_INET != laddr->a.sa.sa_family)
+                       continue;
+
+               fl4->fl4_sport = laddr->a.v4.sin_port;
+               flowi4_update_output(fl4,
+                                    asoc->base.sk->sk_bound_dev_if,
+                                    RT_CONN_FLAGS(asoc->base.sk),
+                                    daddr->v4.sin_addr.s_addr,
+                                    laddr->a.v4.sin_addr.s_addr);
+
+               rt = ip_route_output_key(sock_net(sk), fl4);
+               if (IS_ERR(rt))
+                       continue;
+
+               /* Ensure the src address belongs to the output
+                * interface.
+                */
+               odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
+                                    false);
+               if (!odev || odev->ifindex != fl4->flowi4_oif)
+                       continue;
+
+               dst = &rt->dst;
+               break;
        }
 
 out_unlock:
index 3ee27b7704ffb95430541507e83973e9207f9672..d7eaa7354cf76148d1a2c9ee3af4fff9a24990fb 100644 (file)
@@ -853,7 +853,7 @@ nomem:
 
 /*
  * Respond to a normal COOKIE ACK chunk.
- * We are the side that is being asked for an association.
+ * We are the side that is asking for an association.
  *
  * RFC 2960 5.1 Normal Establishment of an Association
  *
index 9f2add3cba26e54eadc15aeea05c3db167a75665..33bafa2e703e299f3b423d1f95b2a21cf177c634 100644 (file)
@@ -910,13 +910,9 @@ static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi)
                if (switchdev_port_attr_get(dev, &attr))
                        return NULL;
 
-               if (nhsel > 0) {
-                       if (prev_attr.u.ppid.id_len != attr.u.ppid.id_len)
+               if (nhsel > 0 &&
+                   !netdev_phys_item_id_same(&prev_attr.u.ppid, &attr.u.ppid))
                                return NULL;
-                       if (memcmp(prev_attr.u.ppid.id, attr.u.ppid.id,
-                                  attr.u.ppid.id_len))
-                               return NULL;
-               }
 
                prev_attr = attr;
        }
@@ -1043,3 +1039,106 @@ void switchdev_fib_ipv4_abort(struct fib_info *fi)
        fi->fib_net->ipv4.fib_offload_disabled = true;
 }
 EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_abort);
+
+static bool switchdev_port_same_parent_id(struct net_device *a,
+                                         struct net_device *b)
+{
+       struct switchdev_attr a_attr = {
+               .id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+               .flags = SWITCHDEV_F_NO_RECURSE,
+       };
+       struct switchdev_attr b_attr = {
+               .id = SWITCHDEV_ATTR_PORT_PARENT_ID,
+               .flags = SWITCHDEV_F_NO_RECURSE,
+       };
+
+       if (switchdev_port_attr_get(a, &a_attr) ||
+           switchdev_port_attr_get(b, &b_attr))
+               return false;
+
+       return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid);
+}
+
+static u32 switchdev_port_fwd_mark_get(struct net_device *dev,
+                                      struct net_device *group_dev)
+{
+       struct net_device *lower_dev;
+       struct list_head *iter;
+
+       netdev_for_each_lower_dev(group_dev, lower_dev, iter) {
+               if (lower_dev == dev)
+                       continue;
+               if (switchdev_port_same_parent_id(dev, lower_dev))
+                       return lower_dev->offload_fwd_mark;
+               return switchdev_port_fwd_mark_get(dev, lower_dev);
+       }
+
+       return dev->ifindex;
+}
+
+static void switchdev_port_fwd_mark_reset(struct net_device *group_dev,
+                                         u32 old_mark, u32 *reset_mark)
+{
+       struct net_device *lower_dev;
+       struct list_head *iter;
+
+       netdev_for_each_lower_dev(group_dev, lower_dev, iter) {
+               if (lower_dev->offload_fwd_mark == old_mark) {
+                       if (!*reset_mark)
+                               *reset_mark = lower_dev->ifindex;
+                       lower_dev->offload_fwd_mark = *reset_mark;
+               }
+               switchdev_port_fwd_mark_reset(lower_dev, old_mark, reset_mark);
+       }
+}
+
+/**
+ *     switchdev_port_fwd_mark_set - Set port offload forwarding mark
+ *
+ *     @dev: port device
+ *     @group_dev: containing device
+ *     @joining: true if dev is joining group; false if leaving group
+ *
+ *     An ungrouped port's offload mark is just its ifindex.  A grouped
+ *     port's (member of a bridge, for example) offload mark is the ifindex
+ *     of one of the ports in the group with the same parent (switch) ID.
+ *     Ports on the same device in the same group will have the same mark.
+ *
+ *     Example:
+ *
+ *             br0             ifindex=9
+ *               sw1p1         ifindex=2       mark=2
+ *               sw1p2         ifindex=3       mark=2
+ *               sw2p1         ifindex=4       mark=5
+ *               sw2p2         ifindex=5       mark=5
+ *
+ *     If sw2p2 leaves the bridge, we'll have:
+ *
+ *             br0             ifindex=9
+ *               sw1p1         ifindex=2       mark=2
+ *               sw1p2         ifindex=3       mark=2
+ *               sw2p1         ifindex=4       mark=4
+ *             sw2p2           ifindex=5       mark=5
+ */
+void switchdev_port_fwd_mark_set(struct net_device *dev,
+                                struct net_device *group_dev,
+                                bool joining)
+{
+       u32 mark = dev->ifindex;
+       u32 reset_mark = 0;
+
+       if (group_dev && joining) {
+               mark = switchdev_port_fwd_mark_get(dev, group_dev);
+       } else if (group_dev && !joining) {
+               if (dev->offload_fwd_mark == mark)
+                       /* Ohoh, this port was the mark reference port,
+                        * but it's leaving the group, so reset the
+                        * mark for the remaining ports in the group.
+                        */
+                       switchdev_port_fwd_mark_reset(group_dev, mark,
+                                                     &reset_mark);
+       }
+
+       dev->offload_fwd_mark = mark;
+}
+EXPORT_SYMBOL_GPL(switchdev_port_fwd_mark_set);
index a816382fc8af1b9efb016f888493ca4dcc65fe3b..8b010c976b2f7c8eba5f6fe1cadb516b4e0f7269 100644 (file)
@@ -316,6 +316,29 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
        }
 }
 
+void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *hdr)
+{
+       u16 last = msg_last_bcast(hdr);
+       int mtyp = msg_type(hdr);
+
+       if (unlikely(msg_user(hdr) != LINK_PROTOCOL))
+               return;
+       if (mtyp == STATE_MSG) {
+               tipc_bclink_update_link_state(n, last);
+               return;
+       }
+       /* Compatibility: older nodes don't know BCAST_PROTOCOL synchronization,
+        * and transfer synch info in LINK_PROTOCOL messages.
+        */
+       if (tipc_node_is_up(n))
+               return;
+       if ((mtyp != RESET_MSG) && (mtyp != ACTIVATE_MSG))
+               return;
+       n->bclink.last_sent = last;
+       n->bclink.last_in = last;
+       n->bclink.oos_state = 0;
+}
+
 /**
  * bclink_peek_nack - monitor retransmission requests sent by other nodes
  *
@@ -358,10 +381,9 @@ int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
 
        /* Prepare clone of message for local node */
        skb = tipc_msg_reassemble(list);
-       if (unlikely(!skb)) {
-               __skb_queue_purge(list);
+       if (unlikely(!skb))
                return -EHOSTUNREACH;
-       }
+
        /* Broadcast to all nodes */
        if (likely(bclink)) {
                tipc_bclink_lock(net);
@@ -413,7 +435,7 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
         * all nodes in the cluster don't ACK at the same time
         */
        if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
-               tipc_link_proto_xmit(node->active_links[node->addr & 1],
+               tipc_link_proto_xmit(node_active_link(node, node->addr),
                                     STATE_MSG, 0, 0, 0, 0);
                tn->bcl->stats.sent_acks++;
        }
@@ -925,7 +947,6 @@ int tipc_bclink_init(struct net *net)
        tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
        bcl->bearer_id = MAX_BEARERS;
        rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
-       bcl->state = WORKING_WORKING;
        bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
        msg_set_prevnode(bcl->pmsg, tn->own_addr);
        strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
index 3c290a48f72037ece5eddfb55c55d501e7f61e67..d74c69bcf60bda5e04ddc61a988afd6fa94bee66 100644 (file)
@@ -133,5 +133,6 @@ void tipc_bclink_wakeup_users(struct net *net);
 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
 int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]);
 void tipc_bclink_input(struct net *net);
+void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *msg);
 
 #endif
index 00bc0e6205326025212a85e1110bab48c208e73a..ce9f7bfc0b92444950f51893e87abbc426151eb6 100644 (file)
@@ -343,7 +343,7 @@ restart:
 static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b_ptr)
 {
        pr_info("Resetting bearer <%s>\n", b_ptr->name);
-       tipc_link_delete_list(net, b_ptr->identity);
+       tipc_node_delete_links(net, b_ptr->identity);
        tipc_disc_reset(net, b_ptr);
        return 0;
 }
@@ -361,7 +361,7 @@ static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr)
        pr_info("Disabling bearer <%s>\n", b_ptr->name);
        b_ptr->media->disable_media(b_ptr);
 
-       tipc_link_delete_list(net, b_ptr->identity);
+       tipc_node_delete_links(net, b_ptr->identity);
        if (b_ptr->link_req)
                tipc_disc_delete(b_ptr->link_req);
 
@@ -470,6 +470,32 @@ void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
        rcu_read_unlock();
 }
 
+/* tipc_bearer_xmit() -send buffer to destination over bearer
+ */
+void tipc_bearer_xmit(struct net *net, u32 bearer_id,
+                     struct sk_buff_head *xmitq,
+                     struct tipc_media_addr *dst)
+{
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_bearer *b;
+       struct sk_buff *skb, *tmp;
+
+       if (skb_queue_empty(xmitq))
+               return;
+
+       rcu_read_lock();
+       b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
+       if (likely(b)) {
+               skb_queue_walk_safe(xmitq, skb, tmp) {
+                       __skb_dequeue(xmitq);
+                       b->media->send_msg(net, skb, b, dst);
+                       /* Until we remove cloning in tipc_l2_send_msg(): */
+                       kfree_skb(skb);
+               }
+       }
+       rcu_read_unlock();
+}
+
 /**
  * tipc_l2_rcv_msg - handle incoming TIPC message from an interface
  * @buf: the received packet
index dc714d977768c105cff0b774b49be1e5ec1c59fd..6426f242f6262e80594cd1cdc438c4a94f4c7026 100644 (file)
@@ -217,5 +217,8 @@ void tipc_bearer_cleanup(void);
 void tipc_bearer_stop(struct net *net);
 void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
                      struct tipc_media_addr *dest);
+void tipc_bearer_xmit(struct net *net, u32 bearer_id,
+                     struct sk_buff_head *xmitq,
+                     struct tipc_media_addr *dst);
 
 #endif /* _TIPC_BEARER_H */
index 0fcf133d5cb7cef0f33478412cb75809b68a8223..b96b41eabf121cc8577b65d0ad5bb0727ca5d3f7 100644 (file)
@@ -109,6 +109,11 @@ struct tipc_net {
        atomic_t subscription_count;
 };
 
+static inline struct tipc_net *tipc_net(struct net *net)
+{
+       return net_generic(net, tipc_net_id);
+}
+
 static inline u16 mod(u16 x)
 {
        return x & 0xffffu;
@@ -129,6 +134,11 @@ static inline int less(u16 left, u16 right)
        return less_eq(left, right) && (mod(right) != mod(left));
 }
 
+static inline int in_range(u16 val, u16 min, u16 max)
+{
+       return !less(val, min) && !more(val, max);
+}
+
 #ifdef CONFIG_SYSCTL
 int tipc_register_sysctl(void);
 void tipc_unregister_sysctl(void);
index 967e292f53c89182bc0ed128b1dacd51fe02d090..d14e0a4aa9af900a7ace6855ab91eb2bcc901641 100644 (file)
@@ -35,7 +35,7 @@
  */
 
 #include "core.h"
-#include "link.h"
+#include "node.h"
 #include "discover.h"
 
 /* min delay during bearer start up */
@@ -120,30 +120,24 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
  * @buf: buffer containing message
  * @bearer: bearer that message arrived on
  */
-void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
+void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
                   struct tipc_bearer *bearer)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
-       struct tipc_node *node;
-       struct tipc_link *link;
        struct tipc_media_addr maddr;
-       struct sk_buff *rbuf;
-       struct tipc_msg *msg = buf_msg(buf);
-       u32 ddom = msg_dest_domain(msg);
-       u32 onode = msg_prevnode(msg);
-       u32 net_id = msg_bc_netid(msg);
-       u32 mtyp = msg_type(msg);
-       u32 signature = msg_node_sig(msg);
-       u16 caps = msg_node_capabilities(msg);
-       bool addr_match = false;
-       bool sign_match = false;
-       bool link_up = false;
-       bool accept_addr = false;
-       bool accept_sign = false;
+       struct sk_buff *rskb;
+       struct tipc_msg *hdr = buf_msg(skb);
+       u32 ddom = msg_dest_domain(hdr);
+       u32 onode = msg_prevnode(hdr);
+       u32 net_id = msg_bc_netid(hdr);
+       u32 mtyp = msg_type(hdr);
+       u32 signature = msg_node_sig(hdr);
+       u16 caps = msg_node_capabilities(hdr);
        bool respond = false;
+       bool dupl_addr = false;
 
-       bearer->media->msg2addr(bearer, &maddr, msg_media_addr(msg));
-       kfree_skb(buf);
+       bearer->media->msg2addr(bearer, &maddr, msg_media_addr(hdr));
+       kfree_skb(skb);
 
        /* Ensure message from node is valid and communication is permitted */
        if (net_id != tn->net_id)
@@ -165,102 +159,20 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
        if (!tipc_in_scope(bearer->domain, onode))
                return;
 
-       node = tipc_node_create(net, onode);
-       if (!node)
-               return;
-       tipc_node_lock(node);
-       node->capabilities = caps;
-       link = node->links[bearer->identity];
-
-       /* Prepare to validate requesting node's signature and media address */
-       sign_match = (signature == node->signature);
-       addr_match = link && !memcmp(&link->media_addr, &maddr, sizeof(maddr));
-       link_up = link && tipc_link_is_up(link);
-
-
-       /* These three flags give us eight permutations: */
-
-       if (sign_match && addr_match && link_up) {
-               /* All is fine. Do nothing. */
-       } else if (sign_match && addr_match && !link_up) {
-               /* Respond. The link will come up in due time */
-               respond = true;
-       } else if (sign_match && !addr_match && link_up) {
-               /* Peer has changed i/f address without rebooting.
-                * If so, the link will reset soon, and the next
-                * discovery will be accepted. So we can ignore it.
-                * It may also be an cloned or malicious peer having
-                * chosen the same node address and signature as an
-                * existing one.
-                * Ignore requests until the link goes down, if ever.
-                */
-               disc_dupl_alert(bearer, onode, &maddr);
-       } else if (sign_match && !addr_match && !link_up) {
-               /* Peer link has changed i/f address without rebooting.
-                * It may also be a cloned or malicious peer; we can't
-                * distinguish between the two.
-                * The signature is correct, so we must accept.
-                */
-               accept_addr = true;
-               respond = true;
-       } else if (!sign_match && addr_match && link_up) {
-               /* Peer node rebooted. Two possibilities:
-                *  - Delayed re-discovery; this link endpoint has already
-                *    reset and re-established contact with the peer, before
-                *    receiving a discovery message from that node.
-                *    (The peer happened to receive one from this node first).
-                *  - The peer came back so fast that our side has not
-                *    discovered it yet. Probing from this side will soon
-                *    reset the link, since there can be no working link
-                *    endpoint at the peer end, and the link will re-establish.
-                *  Accept the signature, since it comes from a known peer.
-                */
-               accept_sign = true;
-       } else if (!sign_match && addr_match && !link_up) {
-               /*  The peer node has rebooted.
-                *  Accept signature, since it is a known peer.
-                */
-               accept_sign = true;
-               respond = true;
-       } else if (!sign_match && !addr_match && link_up) {
-               /* Peer rebooted with new address, or a new/duplicate peer.
-                * Ignore until the link goes down, if ever.
-                */
+       tipc_node_check_dest(net, onode, bearer, caps, signature,
+                            &maddr, &respond, &dupl_addr);
+       if (dupl_addr)
                disc_dupl_alert(bearer, onode, &maddr);
-       } else if (!sign_match && !addr_match && !link_up) {
-               /* Peer rebooted with new address, or it is a new peer.
-                * Accept signature and address.
-               */
-               accept_sign = true;
-               accept_addr = true;
-               respond = true;
-       }
-
-       if (accept_sign)
-               node->signature = signature;
-
-       if (accept_addr) {
-               if (!link)
-                       link = tipc_link_create(node, bearer, &maddr);
-               if (link) {
-                       memcpy(&link->media_addr, &maddr, sizeof(maddr));
-                       tipc_link_reset(link);
-               } else {
-                       respond = false;
-               }
-       }
 
        /* Send response, if necessary */
        if (respond && (mtyp == DSC_REQ_MSG)) {
-               rbuf = tipc_buf_acquire(MAX_H_SIZE);
-               if (rbuf) {
-                       tipc_disc_init_msg(net, rbuf, DSC_RESP_MSG, bearer);
-                       tipc_bearer_send(net, bearer->identity, rbuf, &maddr);
-                       kfree_skb(rbuf);
+               rskb = tipc_buf_acquire(MAX_H_SIZE);
+               if (rskb) {
+                       tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer);
+                       tipc_bearer_send(net, bearer->identity, rskb, &maddr);
+                       kfree_skb(rskb);
                }
        }
-       tipc_node_unlock(node);
-       tipc_node_put(node);
 }
 
 /**
index eaa9fe54b4aebfb531610611637915dc1b0c7256..f067e5425560fe0d43c184589a397d614c12573b 100644 (file)
@@ -48,9 +48,8 @@
 /*
  * Error message prefixes
  */
-static const char *link_co_err = "Link changeover error, ";
+static const char *link_co_err = "Link tunneling error, ";
 static const char *link_rst_msg = "Resetting link ";
-static const char *link_unk_evt = "Unknown link event ";
 
 static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
        [TIPC_NLA_LINK_UNSPEC]          = { .type = NLA_UNSPEC },
@@ -76,257 +75,414 @@ static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
        [TIPC_NLA_PROP_WIN]             = { .type = NLA_U32 }
 };
 
+/*
+ * Interval between NACKs when packets arrive out of order
+ */
+#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
 /*
  * Out-of-range value for link session numbers
  */
-#define INVALID_SESSION 0x10000
+#define WILDCARD_SESSION 0x10000
 
-/*
- * Link state events:
+/* Link FSM states:
  */
-#define  STARTING_EVT    856384768     /* link processing trigger */
-#define  TRAFFIC_MSG_EVT 560815u       /* rx'd ??? */
-#define  SILENCE_EVT     560817u       /* timer dicovered silence from peer */
+enum {
+       LINK_ESTABLISHED     = 0xe,
+       LINK_ESTABLISHING    = 0xe  << 4,
+       LINK_RESET           = 0x1  << 8,
+       LINK_RESETTING       = 0x2  << 12,
+       LINK_PEER_RESET      = 0xd  << 16,
+       LINK_FAILINGOVER     = 0xf  << 20,
+       LINK_SYNCHING        = 0xc  << 24
+};
 
-/*
- * State value stored in 'failover_pkts'
+/* Link FSM state checking routines
  */
-#define FIRST_FAILOVER 0xffffu
-
-static void link_handle_out_of_seq_msg(struct tipc_link *link,
-                                      struct sk_buff *skb);
-static void tipc_link_proto_rcv(struct tipc_link *link,
-                               struct sk_buff *skb);
-static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
-static void link_state_event(struct tipc_link *l_ptr, u32 event);
+static int link_is_up(struct tipc_link *l)
+{
+       return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
+}
+
+static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
+                              struct sk_buff_head *xmitq);
+static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
+                                     u16 rcvgap, int tolerance, int priority,
+                                     struct sk_buff_head *xmitq);
 static void link_reset_statistics(struct tipc_link *l_ptr);
 static void link_print(struct tipc_link *l_ptr, const char *str);
-static void tipc_link_sync_xmit(struct tipc_link *l);
 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
-static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
-static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
-static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
-static void link_set_timer(struct tipc_link *link, unsigned long time);
+
 /*
- *  Simple link routines
+ *  Simple non-static link routines (i.e. referenced outside this file)
  */
-static unsigned int align(unsigned int i)
+bool tipc_link_is_up(struct tipc_link *l)
 {
-       return (i + 3) & ~3u;
+       return link_is_up(l);
 }
 
-static void tipc_link_release(struct kref *kref)
+bool tipc_link_is_reset(struct tipc_link *l)
 {
-       kfree(container_of(kref, struct tipc_link, ref));
+       return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
 }
 
-static void tipc_link_get(struct tipc_link *l_ptr)
+bool tipc_link_is_synching(struct tipc_link *l)
 {
-       kref_get(&l_ptr->ref);
+       return l->state == LINK_SYNCHING;
 }
 
-static void tipc_link_put(struct tipc_link *l_ptr)
+bool tipc_link_is_failingover(struct tipc_link *l)
 {
-       kref_put(&l_ptr->ref, tipc_link_release);
+       return l->state == LINK_FAILINGOVER;
 }
 
-static struct tipc_link *tipc_parallel_link(struct tipc_link *l)
+bool tipc_link_is_blocked(struct tipc_link *l)
 {
-       if (l->owner->active_links[0] != l)
-               return l->owner->active_links[0];
-       return l->owner->active_links[1];
+       return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
 }
 
-/*
- *  Simple non-static link routines (i.e. referenced outside this file)
- */
-int tipc_link_is_up(struct tipc_link *l_ptr)
+int tipc_link_is_active(struct tipc_link *l)
 {
-       if (!l_ptr)
-               return 0;
-       return link_working_working(l_ptr) || link_working_unknown(l_ptr);
+       struct tipc_node *n = l->owner;
+
+       return (node_active_link(n, 0) == l) || (node_active_link(n, 1) == l);
 }
 
-int tipc_link_is_active(struct tipc_link *l_ptr)
+static u32 link_own_addr(struct tipc_link *l)
 {
-       return  (l_ptr->owner->active_links[0] == l_ptr) ||
-               (l_ptr->owner->active_links[1] == l_ptr);
+       return msg_prevnode(l->pmsg);
 }
 
 /**
- * link_timeout - handle expiration of link timer
- * @l_ptr: pointer to link
+ * tipc_link_create - create a new link
+ * @n: pointer to associated node
+ * @b: pointer to associated bearer
+ * @ownnode: identity of own node
+ * @peer: identity of peer node
+ * @maddr: media address to be used
+ * @inputq: queue to put messages ready for delivery
+ * @namedq: queue to put binding table update messages ready for delivery
+ * @link: return value, pointer to put the created link
+ *
+ * Returns true if link was created, otherwise false
  */
-static void link_timeout(unsigned long data)
+bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session,
+                     u32 ownnode, u32 peer, struct tipc_media_addr *maddr,
+                     struct sk_buff_head *inputq, struct sk_buff_head *namedq,
+                     struct tipc_link **link)
 {
-       struct tipc_link *l_ptr = (struct tipc_link *)data;
-       struct sk_buff *skb;
+       struct tipc_link *l;
+       struct tipc_msg *hdr;
+       char *if_name;
+
+       l = kzalloc(sizeof(*l), GFP_ATOMIC);
+       if (!l)
+               return false;
+       *link = l;
+
+       /* Note: peer i/f name is completed by reset/activate message */
+       if_name = strchr(b->name, ':') + 1;
+       sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
+               tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
+               if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
+
+       l->addr = peer;
+       l->media_addr = maddr;
+       l->owner = n;
+       l->peer_session = WILDCARD_SESSION;
+       l->bearer_id = b->identity;
+       l->tolerance = b->tolerance;
+       l->net_plane = b->net_plane;
+       l->advertised_mtu = b->mtu;
+       l->mtu = b->mtu;
+       l->priority = b->priority;
+       tipc_link_set_queue_limits(l, b->window);
+       l->inputq = inputq;
+       l->namedq = namedq;
+       l->state = LINK_RESETTING;
+       l->pmsg = (struct tipc_msg *)&l->proto_msg;
+       hdr = l->pmsg;
+       tipc_msg_init(ownnode, hdr, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, peer);
+       msg_set_size(hdr, sizeof(l->proto_msg));
+       msg_set_session(hdr, session);
+       msg_set_bearer_id(hdr, l->bearer_id);
+       strcpy((char *)msg_data(hdr), if_name);
+       __skb_queue_head_init(&l->transmq);
+       __skb_queue_head_init(&l->backlogq);
+       __skb_queue_head_init(&l->deferdq);
+       skb_queue_head_init(&l->wakeupq);
+       skb_queue_head_init(l->inputq);
+       return true;
+}
 
-       tipc_node_lock(l_ptr->owner);
+/* tipc_link_build_bcast_sync_msg() - synchronize broadcast link endpoints.
+ *
+ * Give a newly added peer node the sequence number where it should
+ * start receiving and acking broadcast packets.
+ */
+void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
+                                   struct sk_buff_head *xmitq)
+{
+       struct sk_buff *skb;
+       struct sk_buff_head list;
+       u16 last_sent;
 
-       /* update counters used in statistical profiling of send traffic */
-       l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->transmq);
-       l_ptr->stats.queue_sz_counts++;
+       skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
+                             0, l->addr, link_own_addr(l), 0, 0, 0);
+       if (!skb)
+               return;
+       last_sent = tipc_bclink_get_last_sent(l->owner->net);
+       msg_set_last_bcast(buf_msg(skb), last_sent);
+       __skb_queue_head_init(&list);
+       __skb_queue_tail(&list, skb);
+       tipc_link_xmit(l, &list, xmitq);
+}
 
-       skb = skb_peek(&l_ptr->transmq);
-       if (skb) {
-               struct tipc_msg *msg = buf_msg(skb);
-               u32 length = msg_size(msg);
+/**
+ * tipc_link_fsm_evt - link finite state machine
+ * @l: pointer to link
+ * @evt: state machine event to be processed
+ */
+int tipc_link_fsm_evt(struct tipc_link *l, int evt)
+{
+       int rc = 0;
 
-               if ((msg_user(msg) == MSG_FRAGMENTER) &&
-                   (msg_type(msg) == FIRST_FRAGMENT)) {
-                       length = msg_size(msg_get_wrapped(msg));
+       switch (l->state) {
+       case LINK_RESETTING:
+               switch (evt) {
+               case LINK_PEER_RESET_EVT:
+                       l->state = LINK_PEER_RESET;
+                       break;
+               case LINK_RESET_EVT:
+                       l->state = LINK_RESET;
+                       break;
+               case LINK_FAILURE_EVT:
+               case LINK_FAILOVER_BEGIN_EVT:
+               case LINK_ESTABLISH_EVT:
+               case LINK_FAILOVER_END_EVT:
+               case LINK_SYNCH_BEGIN_EVT:
+               case LINK_SYNCH_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case LINK_RESET:
+               switch (evt) {
+               case LINK_PEER_RESET_EVT:
+                       l->state = LINK_ESTABLISHING;
+                       break;
+               case LINK_FAILOVER_BEGIN_EVT:
+                       l->state = LINK_FAILINGOVER;
+               case LINK_FAILURE_EVT:
+               case LINK_RESET_EVT:
+               case LINK_ESTABLISH_EVT:
+               case LINK_FAILOVER_END_EVT:
+                       break;
+               case LINK_SYNCH_BEGIN_EVT:
+               case LINK_SYNCH_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case LINK_PEER_RESET:
+               switch (evt) {
+               case LINK_RESET_EVT:
+                       l->state = LINK_ESTABLISHING;
+                       break;
+               case LINK_PEER_RESET_EVT:
+               case LINK_ESTABLISH_EVT:
+               case LINK_FAILURE_EVT:
+                       break;
+               case LINK_SYNCH_BEGIN_EVT:
+               case LINK_SYNCH_END_EVT:
+               case LINK_FAILOVER_BEGIN_EVT:
+               case LINK_FAILOVER_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case LINK_FAILINGOVER:
+               switch (evt) {
+               case LINK_FAILOVER_END_EVT:
+                       l->state = LINK_RESET;
+                       break;
+               case LINK_PEER_RESET_EVT:
+               case LINK_RESET_EVT:
+               case LINK_ESTABLISH_EVT:
+               case LINK_FAILURE_EVT:
+                       break;
+               case LINK_FAILOVER_BEGIN_EVT:
+               case LINK_SYNCH_BEGIN_EVT:
+               case LINK_SYNCH_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case LINK_ESTABLISHING:
+               switch (evt) {
+               case LINK_ESTABLISH_EVT:
+                       l->state = LINK_ESTABLISHED;
+                       rc |= TIPC_LINK_UP_EVT;
+                       break;
+               case LINK_FAILOVER_BEGIN_EVT:
+                       l->state = LINK_FAILINGOVER;
+                       break;
+               case LINK_PEER_RESET_EVT:
+               case LINK_RESET_EVT:
+               case LINK_FAILURE_EVT:
+               case LINK_SYNCH_BEGIN_EVT:
+               case LINK_FAILOVER_END_EVT:
+                       break;
+               case LINK_SYNCH_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case LINK_ESTABLISHED:
+               switch (evt) {
+               case LINK_PEER_RESET_EVT:
+                       l->state = LINK_PEER_RESET;
+                       rc |= TIPC_LINK_DOWN_EVT;
+                       break;
+               case LINK_FAILURE_EVT:
+                       l->state = LINK_RESETTING;
+                       rc |= TIPC_LINK_DOWN_EVT;
+                       break;
+               case LINK_RESET_EVT:
+                       l->state = LINK_RESET;
+                       break;
+               case LINK_ESTABLISH_EVT:
+                       break;
+               case LINK_SYNCH_BEGIN_EVT:
+                       l->state = LINK_SYNCHING;
+                       break;
+               case LINK_SYNCH_END_EVT:
+               case LINK_FAILOVER_BEGIN_EVT:
+               case LINK_FAILOVER_END_EVT:
+               default:
+                       goto illegal_evt;
                }
-               if (length) {
-                       l_ptr->stats.msg_lengths_total += length;
-                       l_ptr->stats.msg_length_counts++;
-                       if (length <= 64)
-                               l_ptr->stats.msg_length_profile[0]++;
-                       else if (length <= 256)
-                               l_ptr->stats.msg_length_profile[1]++;
-                       else if (length <= 1024)
-                               l_ptr->stats.msg_length_profile[2]++;
-                       else if (length <= 4096)
-                               l_ptr->stats.msg_length_profile[3]++;
-                       else if (length <= 16384)
-                               l_ptr->stats.msg_length_profile[4]++;
-                       else if (length <= 32768)
-                               l_ptr->stats.msg_length_profile[5]++;
-                       else
-                               l_ptr->stats.msg_length_profile[6]++;
+               break;
+       case LINK_SYNCHING:
+               switch (evt) {
+               case LINK_PEER_RESET_EVT:
+                       l->state = LINK_PEER_RESET;
+                       rc |= TIPC_LINK_DOWN_EVT;
+                       break;
+               case LINK_FAILURE_EVT:
+                       l->state = LINK_RESETTING;
+                       rc |= TIPC_LINK_DOWN_EVT;
+                       break;
+               case LINK_RESET_EVT:
+                       l->state = LINK_RESET;
+                       break;
+               case LINK_ESTABLISH_EVT:
+               case LINK_SYNCH_BEGIN_EVT:
+                       break;
+               case LINK_SYNCH_END_EVT:
+                       l->state = LINK_ESTABLISHED;
+                       break;
+               case LINK_FAILOVER_BEGIN_EVT:
+               case LINK_FAILOVER_END_EVT:
+               default:
+                       goto illegal_evt;
                }
+               break;
+       default:
+               pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
        }
-
-       /* do all other link processing performed on a periodic basis */
-       if (l_ptr->silent_intv_cnt || tipc_bclink_acks_missing(l_ptr->owner))
-               link_state_event(l_ptr, SILENCE_EVT);
-       l_ptr->silent_intv_cnt++;
-       if (skb_queue_len(&l_ptr->backlogq))
-               tipc_link_push_packets(l_ptr);
-       link_set_timer(l_ptr, l_ptr->keepalive_intv);
-       tipc_node_unlock(l_ptr->owner);
-       tipc_link_put(l_ptr);
-}
-
-static void link_set_timer(struct tipc_link *link, unsigned long time)
-{
-       if (!mod_timer(&link->timer, jiffies + time))
-               tipc_link_get(link);
+       return rc;
+illegal_evt:
+       pr_err("Illegal FSM event %x in state %x on link %s\n",
+              evt, l->state, l->name);
+       return rc;
 }
 
-/**
- * tipc_link_create - create a new link
- * @n_ptr: pointer to associated node
- * @b_ptr: pointer to associated bearer
- * @media_addr: media address to use when sending messages over link
- *
- * Returns pointer to link.
+/* link_profile_stats - update statistical profiling of traffic
  */
-struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
-                                  struct tipc_bearer *b_ptr,
-                                  const struct tipc_media_addr *media_addr)
+static void link_profile_stats(struct tipc_link *l)
 {
-       struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
-       struct tipc_link *l_ptr;
+       struct sk_buff *skb;
        struct tipc_msg *msg;
-       char *if_name;
-       char addr_string[16];
-       u32 peer = n_ptr->addr;
+       int length;
 
-       if (n_ptr->link_cnt >= MAX_BEARERS) {
-               tipc_addr_string_fill(addr_string, n_ptr->addr);
-               pr_err("Cannot establish %uth link to %s. Max %u allowed.\n",
-                      n_ptr->link_cnt, addr_string, MAX_BEARERS);
-               return NULL;
-       }
+       /* Update counters used in statistical profiling of send traffic */
+       l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
+       l->stats.queue_sz_counts++;
 
-       if (n_ptr->links[b_ptr->identity]) {
-               tipc_addr_string_fill(addr_string, n_ptr->addr);
-               pr_err("Attempt to establish second link on <%s> to %s\n",
-                      b_ptr->name, addr_string);
-               return NULL;
-       }
+       skb = skb_peek(&l->transmq);
+       if (!skb)
+               return;
+       msg = buf_msg(skb);
+       length = msg_size(msg);
 
-       l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
-       if (!l_ptr) {
-               pr_warn("Link creation failed, no memory\n");
-               return NULL;
+       if (msg_user(msg) == MSG_FRAGMENTER) {
+               if (msg_type(msg) != FIRST_FRAGMENT)
+                       return;
+               length = msg_size(msg_get_wrapped(msg));
        }
-       kref_init(&l_ptr->ref);
-       l_ptr->addr = peer;
-       if_name = strchr(b_ptr->name, ':') + 1;
-       sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
-               tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
-               tipc_node(tn->own_addr),
-               if_name,
-               tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
-               /* note: peer i/f name is updated by reset/activate message */
-       memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
-       l_ptr->owner = n_ptr;
-       l_ptr->peer_session = INVALID_SESSION;
-       l_ptr->bearer_id = b_ptr->identity;
-       link_set_supervision_props(l_ptr, b_ptr->tolerance);
-       l_ptr->state = RESET_UNKNOWN;
-
-       l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
-       msg = l_ptr->pmsg;
-       tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
-                     l_ptr->addr);
-       msg_set_size(msg, sizeof(l_ptr->proto_msg));
-       msg_set_session(msg, (tn->random & 0xffff));
-       msg_set_bearer_id(msg, b_ptr->identity);
-       strcpy((char *)msg_data(msg), if_name);
-       l_ptr->net_plane = b_ptr->net_plane;
-       l_ptr->advertised_mtu = b_ptr->mtu;
-       l_ptr->mtu = l_ptr->advertised_mtu;
-       l_ptr->priority = b_ptr->priority;
-       tipc_link_set_queue_limits(l_ptr, b_ptr->window);
-       l_ptr->snd_nxt = 1;
-       __skb_queue_head_init(&l_ptr->transmq);
-       __skb_queue_head_init(&l_ptr->backlogq);
-       __skb_queue_head_init(&l_ptr->deferdq);
-       skb_queue_head_init(&l_ptr->wakeupq);
-       skb_queue_head_init(&l_ptr->inputq);
-       skb_queue_head_init(&l_ptr->namedq);
-       link_reset_statistics(l_ptr);
-       tipc_node_attach_link(n_ptr, l_ptr);
-       setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr);
-       link_state_event(l_ptr, STARTING_EVT);
-
-       return l_ptr;
+       l->stats.msg_lengths_total += length;
+       l->stats.msg_length_counts++;
+       if (length <= 64)
+               l->stats.msg_length_profile[0]++;
+       else if (length <= 256)
+               l->stats.msg_length_profile[1]++;
+       else if (length <= 1024)
+               l->stats.msg_length_profile[2]++;
+       else if (length <= 4096)
+               l->stats.msg_length_profile[3]++;
+       else if (length <= 16384)
+               l->stats.msg_length_profile[4]++;
+       else if (length <= 32768)
+               l->stats.msg_length_profile[5]++;
+       else
+               l->stats.msg_length_profile[6]++;
 }
 
-/**
- * tipc_link_delete - Delete a link
- * @l: link to be deleted
+/* tipc_link_timeout - perform periodic task as instructed from node timeout
  */
-void tipc_link_delete(struct tipc_link *l)
+int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
 {
-       tipc_link_reset(l);
-       if (del_timer(&l->timer))
-               tipc_link_put(l);
-       l->flags |= LINK_STOPPED;
-       /* Delete link now, or when timer is finished: */
-       tipc_link_reset_fragments(l);
-       tipc_node_detach_link(l->owner, l);
-       tipc_link_put(l);
-}
+       int rc = 0;
+       int mtyp = STATE_MSG;
+       bool xmit = false;
+       bool prb = false;
+
+       link_profile_stats(l);
+
+       switch (l->state) {
+       case LINK_ESTABLISHED:
+       case LINK_SYNCHING:
+               if (!l->silent_intv_cnt) {
+                       if (tipc_bclink_acks_missing(l->owner))
+                               xmit = true;
+               } else if (l->silent_intv_cnt <= l->abort_limit) {
+                       xmit = true;
+                       prb = true;
+               } else {
+                       rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+               }
+               l->silent_intv_cnt++;
+               break;
+       case LINK_RESET:
+               xmit = true;
+               mtyp = RESET_MSG;
+               break;
+       case LINK_ESTABLISHING:
+               xmit = true;
+               mtyp = ACTIVATE_MSG;
+               break;
+       case LINK_PEER_RESET:
+       case LINK_RESETTING:
+       case LINK_FAILINGOVER:
+               break;
+       default:
+               break;
+       }
 
-void tipc_link_delete_list(struct net *net, unsigned int bearer_id)
-{
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-       struct tipc_link *link;
-       struct tipc_node *node;
+       if (xmit)
+               tipc_link_build_proto_msg(l, mtyp, prb, 0, 0, 0, xmitq);
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(node, &tn->node_list, list) {
-               tipc_node_lock(node);
-               link = node->links[bearer_id];
-               if (link)
-                       tipc_link_delete(link);
-               tipc_node_unlock(node);
-       }
-       rcu_read_unlock();
+       return rc;
 }
 
 /**
@@ -334,7 +490,7 @@ void tipc_link_delete_list(struct net *net, unsigned int bearer_id)
  * @link: congested link
  * @list: message that was attempted sent
  * Create pseudo msg to send back to user when congestion abates
- * Only consumes message if there is an error
+ * Does not consume buffer list
  */
 static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
 {
@@ -347,8 +503,7 @@ static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
        /* This really cannot happen...  */
        if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
                pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
-               tipc_link_reset(link);
-               goto err;
+               return -ENOBUFS;
        }
        /* Non-blocking sender: */
        if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
@@ -358,15 +513,12 @@ static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
        skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
                              addr, addr, oport, 0, 0);
        if (!skb)
-               goto err;
+               return -ENOBUFS;
        TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
        TIPC_SKB_CB(skb)->chain_imp = imp;
        skb_queue_tail(&link->wakeupq, skb);
        link->stats.link_congs++;
        return -ELINKCONG;
-err:
-       __skb_queue_purge(list);
-       return -ENOBUFS;
 }
 
 /**
@@ -388,9 +540,7 @@ void link_prepare_wakeup(struct tipc_link *l)
                if ((pnd[imp] + l->backlog[imp].len) >= lim)
                        break;
                skb_unlink(skb, &l->wakeupq);
-               skb_queue_tail(&l->inputq, skb);
-               l->owner->inputq = &l->inputq;
-               l->owner->action_flags |= TIPC_MSG_EVT;
+               skb_queue_tail(l->inputq, skb);
        }
 }
 
@@ -426,208 +576,36 @@ void tipc_link_purge_queues(struct tipc_link *l_ptr)
        tipc_link_reset_fragments(l_ptr);
 }
 
-void tipc_link_reset(struct tipc_link *l_ptr)
+void tipc_link_reset(struct tipc_link *l)
 {
-       u32 prev_state = l_ptr->state;
-       int was_active_link = tipc_link_is_active(l_ptr);
-       struct tipc_node *owner = l_ptr->owner;
-       struct tipc_link *pl = tipc_parallel_link(l_ptr);
-
-       msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
+       tipc_link_fsm_evt(l, LINK_RESET_EVT);
 
        /* Link is down, accept any session */
-       l_ptr->peer_session = INVALID_SESSION;
-
-       /* Prepare for renewed mtu size negotiation */
-       l_ptr->mtu = l_ptr->advertised_mtu;
-
-       l_ptr->state = RESET_UNKNOWN;
-
-       if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
-               return;
-
-       tipc_node_link_down(l_ptr->owner, l_ptr);
-       tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
-
-       if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
-               l_ptr->flags |= LINK_FAILINGOVER;
-               l_ptr->failover_checkpt = l_ptr->rcv_nxt;
-               pl->failover_pkts = FIRST_FAILOVER;
-               pl->failover_checkpt = l_ptr->rcv_nxt;
-               pl->failover_skb = l_ptr->reasm_buf;
-       } else {
-               kfree_skb(l_ptr->reasm_buf);
-       }
-       /* Clean up all queues, except inputq: */
-       __skb_queue_purge(&l_ptr->transmq);
-       __skb_queue_purge(&l_ptr->deferdq);
-       if (!owner->inputq)
-               owner->inputq = &l_ptr->inputq;
-       skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
-       if (!skb_queue_empty(owner->inputq))
-               owner->action_flags |= TIPC_MSG_EVT;
-       tipc_link_purge_backlog(l_ptr);
-       l_ptr->reasm_buf = NULL;
-       l_ptr->rcv_unacked = 0;
-       l_ptr->snd_nxt = 1;
-       l_ptr->silent_intv_cnt = 0;
-       l_ptr->stale_count = 0;
-       link_reset_statistics(l_ptr);
-}
-
-static void link_activate(struct tipc_link *link)
-{
-       struct tipc_node *node = link->owner;
-
-       link->rcv_nxt = 1;
-       link->stats.recv_info = 1;
-       link->silent_intv_cnt = 0;
-       tipc_node_link_up(node, link);
-       tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
-}
-
-/**
- * link_state_event - link finite state machine
- * @l_ptr: pointer to link
- * @event: state machine event to process
- */
-static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
-{
-       struct tipc_link *other;
-       unsigned long timer_intv = l_ptr->keepalive_intv;
+       l->peer_session = WILDCARD_SESSION;
 
-       if (l_ptr->flags & LINK_STOPPED)
-               return;
-
-       if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
-               return;         /* Not yet. */
-
-       if (l_ptr->flags & LINK_FAILINGOVER)
-               return;
+       /* If peer is up, it only accepts an incremented session number */
+       msg_set_session(l->pmsg, msg_session(l->pmsg) + 1);
 
-       switch (l_ptr->state) {
-       case WORKING_WORKING:
-               switch (event) {
-               case TRAFFIC_MSG_EVT:
-               case ACTIVATE_MSG:
-                       l_ptr->silent_intv_cnt = 0;
-                       break;
-               case SILENCE_EVT:
-                       if (!l_ptr->silent_intv_cnt) {
-                               if (tipc_bclink_acks_missing(l_ptr->owner))
-                                       tipc_link_proto_xmit(l_ptr, STATE_MSG,
-                                                            0, 0, 0, 0);
-                               break;
-                       }
-                       l_ptr->state = WORKING_UNKNOWN;
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
-                       break;
-               case RESET_MSG:
-                       pr_debug("%s<%s>, requested by peer\n",
-                                link_rst_msg, l_ptr->name);
-                       tipc_link_reset(l_ptr);
-                       l_ptr->state = RESET_RESET;
-                       tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-                                            0, 0, 0, 0);
-                       break;
-               default:
-                       pr_debug("%s%u in WW state\n", link_unk_evt, event);
-               }
-               break;
-       case WORKING_UNKNOWN:
-               switch (event) {
-               case TRAFFIC_MSG_EVT:
-               case ACTIVATE_MSG:
-                       l_ptr->state = WORKING_WORKING;
-                       l_ptr->silent_intv_cnt = 0;
-                       break;
-               case RESET_MSG:
-                       pr_debug("%s<%s>, requested by peer while probing\n",
-                                link_rst_msg, l_ptr->name);
-                       tipc_link_reset(l_ptr);
-                       l_ptr->state = RESET_RESET;
-                       tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-                                            0, 0, 0, 0);
-                       break;
-               case SILENCE_EVT:
-                       if (!l_ptr->silent_intv_cnt) {
-                               l_ptr->state = WORKING_WORKING;
-                               if (tipc_bclink_acks_missing(l_ptr->owner))
-                                       tipc_link_proto_xmit(l_ptr, STATE_MSG,
-                                                            0, 0, 0, 0);
-                       } else if (l_ptr->silent_intv_cnt <
-                                  l_ptr->abort_limit) {
-                               tipc_link_proto_xmit(l_ptr, STATE_MSG,
-                                                    1, 0, 0, 0);
-                       } else {        /* Link has failed */
-                               pr_debug("%s<%s>, peer not responding\n",
-                                        link_rst_msg, l_ptr->name);
-                               tipc_link_reset(l_ptr);
-                               l_ptr->state = RESET_UNKNOWN;
-                               tipc_link_proto_xmit(l_ptr, RESET_MSG,
-                                                    0, 0, 0, 0);
-                       }
-                       break;
-               default:
-                       pr_err("%s%u in WU state\n", link_unk_evt, event);
-               }
-               break;
-       case RESET_UNKNOWN:
-               switch (event) {
-               case TRAFFIC_MSG_EVT:
-                       break;
-               case ACTIVATE_MSG:
-                       other = l_ptr->owner->active_links[0];
-                       if (other && link_working_unknown(other))
-                               break;
-                       l_ptr->state = WORKING_WORKING;
-                       link_activate(l_ptr);
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
-                       if (l_ptr->owner->working_links == 1)
-                               tipc_link_sync_xmit(l_ptr);
-                       break;
-               case RESET_MSG:
-                       l_ptr->state = RESET_RESET;
-                       tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-                                            1, 0, 0, 0);
-                       break;
-               case STARTING_EVT:
-                       l_ptr->flags |= LINK_STARTED;
-                       link_set_timer(l_ptr, timer_intv);
-                       break;
-               case SILENCE_EVT:
-                       tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0);
-                       break;
-               default:
-                       pr_err("%s%u in RU state\n", link_unk_evt, event);
-               }
-               break;
-       case RESET_RESET:
-               switch (event) {
-               case TRAFFIC_MSG_EVT:
-               case ACTIVATE_MSG:
-                       other = l_ptr->owner->active_links[0];
-                       if (other && link_working_unknown(other))
-                               break;
-                       l_ptr->state = WORKING_WORKING;
-                       link_activate(l_ptr);
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
-                       if (l_ptr->owner->working_links == 1)
-                               tipc_link_sync_xmit(l_ptr);
-                       break;
-               case RESET_MSG:
-                       break;
-               case SILENCE_EVT:
-                       tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
-                                            0, 0, 0, 0);
-                       break;
-               default:
-                       pr_err("%s%u in RR state\n", link_unk_evt, event);
-               }
-               break;
-       default:
-               pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
-       }
+       /* Prepare for renewed mtu size negotiation */
+       l->mtu = l->advertised_mtu;
+
+       /* Clean up all queues: */
+       __skb_queue_purge(&l->transmq);
+       __skb_queue_purge(&l->deferdq);
+       skb_queue_splice_init(&l->wakeupq, l->inputq);
+
+       tipc_link_purge_backlog(l);
+       kfree_skb(l->reasm_buf);
+       kfree_skb(l->failover_reasm_skb);
+       l->reasm_buf = NULL;
+       l->failover_reasm_skb = NULL;
+       l->rcv_unacked = 0;
+       l->snd_nxt = 1;
+       l->rcv_nxt = 1;
+       l->silent_intv_cnt = 0;
+       l->stats.recv_info = 0;
+       l->stale_count = 0;
+       link_reset_statistics(l);
 }
 
 /**
@@ -635,8 +613,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
  * @link: link to use
  * @list: chain of buffers containing message
  *
- * Consumes the buffer chain, except when returning -ELINKCONG,
- * since the caller then may want to make more send attempts.
+ * Consumes the buffer chain, except when returning an error code,
  * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
  * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
  */
@@ -650,7 +627,7 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
        u16 ack = mod(link->rcv_nxt - 1);
        u16 seqno = link->snd_nxt;
        u16 bc_last_in = link->owner->bclink.last_in;
-       struct tipc_media_addr *addr = &link->media_addr;
+       struct tipc_media_addr *addr = link->media_addr;
        struct sk_buff_head *transmq = &link->transmq;
        struct sk_buff_head *backlogq = &link->backlogq;
        struct sk_buff *skb, *bskb;
@@ -660,10 +637,9 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
                if (unlikely(link->backlog[i].len >= link->backlog[i].limit))
                        return link_schedule_user(link, list);
        }
-       if (unlikely(msg_size(msg) > mtu)) {
-               __skb_queue_purge(list);
+       if (unlikely(msg_size(msg) > mtu))
                return -EMSGSIZE;
-       }
+
        /* Prepare each packet for sending, and add to relevant queue: */
        while (skb_queue_len(list)) {
                skb = skb_peek(list);
@@ -700,101 +676,76 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
        return 0;
 }
 
-static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
-{
-       skb_queue_head_init(list);
-       __skb_queue_tail(list, skb);
-}
-
-static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
-{
-       struct sk_buff_head head;
-
-       skb2list(skb, &head);
-       return __tipc_link_xmit(link->owner->net, link, &head);
-}
-
-/* tipc_link_xmit_skb(): send single buffer to destination
- * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
- * messages, which will not be rejected
- * The only exception is datagram messages rerouted after secondary
- * lookup, which are rare and safe to dispose of anyway.
- * TODO: Return real return value, and let callers use
- * tipc_wait_for_sendpkt() where applicable
- */
-int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
-                      u32 selector)
-{
-       struct sk_buff_head head;
-       int rc;
-
-       skb2list(skb, &head);
-       rc = tipc_link_xmit(net, &head, dnode, selector);
-       if (rc == -ELINKCONG)
-               kfree_skb(skb);
-       return 0;
-}
-
 /**
- * tipc_link_xmit() is the general link level function for message sending
- * @net: the applicable net namespace
+ * tipc_link_xmit(): enqueue buffer list according to queue situation
+ * @link: link to use
  * @list: chain of buffers containing message
- * @dsz: amount of user data to be sent
- * @dnode: address of destination node
- * @selector: a number used for deterministic link selection
- * Consumes the buffer chain, except when returning -ELINKCONG
- * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
+ * @xmitq: returned list of packets to be sent by caller
+ *
+ * Consumes the buffer chain, except when returning -ELINKCONG,
+ * since the caller then may want to make more send attempts.
+ * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
+ * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
  */
-int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
-                  u32 selector)
+int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
+                  struct sk_buff_head *xmitq)
 {
-       struct tipc_link *link = NULL;
-       struct tipc_node *node;
-       int rc = -EHOSTUNREACH;
+       struct tipc_msg *hdr = buf_msg(skb_peek(list));
+       unsigned int maxwin = l->window;
+       unsigned int i, imp = msg_importance(hdr);
+       unsigned int mtu = l->mtu;
+       u16 ack = l->rcv_nxt - 1;
+       u16 seqno = l->snd_nxt;
+       u16 bc_last_in = l->owner->bclink.last_in;
+       struct sk_buff_head *transmq = &l->transmq;
+       struct sk_buff_head *backlogq = &l->backlogq;
+       struct sk_buff *skb, *_skb, *bskb;
 
-       node = tipc_node_find(net, dnode);
-       if (node) {
-               tipc_node_lock(node);
-               link = node->active_links[selector & 1];
-               if (link)
-                       rc = __tipc_link_xmit(net, link, list);
-               tipc_node_unlock(node);
-               tipc_node_put(node);
-       }
-       if (link)
-               return rc;
-
-       if (likely(in_own_node(net, dnode))) {
-               tipc_sk_rcv(net, list);
-               return 0;
+       /* Match msg importance against this and all higher backlog limits: */
+       for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
+               if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
+                       return link_schedule_user(l, list);
        }
+       if (unlikely(msg_size(hdr) > mtu))
+               return -EMSGSIZE;
 
-       __skb_queue_purge(list);
-       return rc;
-}
-
-/*
- * tipc_link_sync_xmit - synchronize broadcast link endpoints.
- *
- * Give a newly added peer node the sequence number where it should
- * start receiving and acking broadcast packets.
- *
- * Called with node locked
- */
-static void tipc_link_sync_xmit(struct tipc_link *link)
-{
-       struct sk_buff *skb;
-       struct tipc_msg *msg;
-
-       skb = tipc_buf_acquire(INT_H_SIZE);
-       if (!skb)
-               return;
+       /* Prepare each packet for sending, and add to relevant queue: */
+       while (skb_queue_len(list)) {
+               skb = skb_peek(list);
+               hdr = buf_msg(skb);
+               msg_set_seqno(hdr, seqno);
+               msg_set_ack(hdr, ack);
+               msg_set_bcast_ack(hdr, bc_last_in);
 
-       msg = buf_msg(skb);
-       tipc_msg_init(link_own_addr(link), msg, BCAST_PROTOCOL, STATE_MSG,
-                     INT_H_SIZE, link->addr);
-       msg_set_last_bcast(msg, link->owner->bclink.acked);
-       __tipc_link_xmit_skb(link, skb);
+               if (likely(skb_queue_len(transmq) < maxwin)) {
+                       _skb = skb_clone(skb, GFP_ATOMIC);
+                       if (!_skb)
+                               return -ENOBUFS;
+                       __skb_dequeue(list);
+                       __skb_queue_tail(transmq, skb);
+                       __skb_queue_tail(xmitq, _skb);
+                       l->rcv_unacked = 0;
+                       seqno++;
+                       continue;
+               }
+               if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
+                       kfree_skb(__skb_dequeue(list));
+                       l->stats.sent_bundled++;
+                       continue;
+               }
+               if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
+                       kfree_skb(__skb_dequeue(list));
+                       __skb_queue_tail(backlogq, bskb);
+                       l->backlog[msg_importance(buf_msg(bskb))].len++;
+                       l->stats.sent_bundled++;
+                       l->stats.sent_bundles++;
+                       continue;
+               }
+               l->backlog[imp].len += skb_queue_len(list);
+               skb_queue_splice_tail_init(list, backlogq);
+       }
+       l->snd_nxt = seqno;
+       return 0;
 }
 
 /*
@@ -842,29 +793,37 @@ void tipc_link_push_packets(struct tipc_link *link)
                link->rcv_unacked = 0;
                __skb_queue_tail(&link->transmq, skb);
                tipc_bearer_send(link->owner->net, link->bearer_id,
-                                skb, &link->media_addr);
+                                skb, link->media_addr);
        }
        link->snd_nxt = seqno;
 }
 
-void tipc_link_reset_all(struct tipc_node *node)
+void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
 {
-       char addr_string[16];
-       u32 i;
+       struct sk_buff *skb, *_skb;
+       struct tipc_msg *hdr;
+       u16 seqno = l->snd_nxt;
+       u16 ack = l->rcv_nxt - 1;
 
-       tipc_node_lock(node);
-
-       pr_warn("Resetting all links to %s\n",
-               tipc_addr_string_fill(addr_string, node->addr));
-
-       for (i = 0; i < MAX_BEARERS; i++) {
-               if (node->links[i]) {
-                       link_print(node->links[i], "Resetting link\n");
-                       tipc_link_reset(node->links[i]);
-               }
+       while (skb_queue_len(&l->transmq) < l->window) {
+               skb = skb_peek(&l->backlogq);
+               if (!skb)
+                       break;
+               _skb = skb_clone(skb, GFP_ATOMIC);
+               if (!_skb)
+                       break;
+               __skb_dequeue(&l->backlogq);
+               hdr = buf_msg(skb);
+               l->backlog[msg_importance(hdr)].len--;
+               __skb_queue_tail(&l->transmq, skb);
+               __skb_queue_tail(xmitq, _skb);
+               msg_set_ack(hdr, ack);
+               msg_set_seqno(hdr, seqno);
+               msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+               l->rcv_unacked = 0;
+               seqno++;
        }
-
-       tipc_node_unlock(node);
+       l->snd_nxt = seqno;
 }
 
 static void link_retransmit_failure(struct tipc_link *l_ptr,
@@ -877,9 +836,12 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
 
        if (l_ptr->addr) {
                /* Handle failure on standard link */
-               link_print(l_ptr, "Resetting link\n");
-               tipc_link_reset(l_ptr);
-
+               link_print(l_ptr, "Resetting link ");
+               pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
+                       msg_user(msg), msg_type(msg), msg_size(msg),
+                       msg_errcode(msg));
+               pr_info("sqno %u, prev: %x, src: %x\n",
+                       msg_seqno(msg), msg_prevnode(msg), msg_orignode(msg));
        } else {
                /* Handle failure on broadcast link */
                struct tipc_node *n_ptr;
@@ -934,191 +896,45 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
                msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
                msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
                tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
-                                &l_ptr->media_addr);
+                                l_ptr->media_addr);
                retransmits--;
                l_ptr->stats.retransmitted++;
        }
 }
 
-/* link_synch(): check if all packets arrived before the synch
- *               point have been consumed
- * Returns true if the parallel links are synched, otherwise false
- */
-static bool link_synch(struct tipc_link *l)
-{
-       unsigned int post_synch;
-       struct tipc_link *pl;
-
-       pl  = tipc_parallel_link(l);
-       if (pl == l)
-               goto synched;
-
-       /* Was last pre-synch packet added to input queue ? */
-       if (less_eq(pl->rcv_nxt, l->synch_point))
-               return false;
-
-       /* Is it still in the input queue ? */
-       post_synch = mod(pl->rcv_nxt - l->synch_point) - 1;
-       if (skb_queue_len(&pl->inputq) > post_synch)
-               return false;
-synched:
-       l->flags &= ~LINK_SYNCHING;
-       return true;
-}
-
-static void link_retrieve_defq(struct tipc_link *link,
-                              struct sk_buff_head *list)
+static int tipc_link_retransm(struct tipc_link *l, int retransm,
+                             struct sk_buff_head *xmitq)
 {
-       u16 seq_no;
-
-       if (skb_queue_empty(&link->deferdq))
-               return;
-
-       seq_no = buf_seqno(skb_peek(&link->deferdq));
-       if (seq_no == link->rcv_nxt)
-               skb_queue_splice_tail_init(&link->deferdq, list);
-}
-
-/**
- * tipc_rcv - process TIPC packets/messages arriving from off-node
- * @net: the applicable net namespace
- * @skb: TIPC packet
- * @b_ptr: pointer to bearer message arrived on
- *
- * Invoked with no locks held.  Bearer pointer must point to a valid bearer
- * structure (i.e. cannot be NULL), but bearer can be inactive.
- */
-void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
-{
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-       struct sk_buff_head head;
-       struct tipc_node *n_ptr;
-       struct tipc_link *l_ptr;
-       struct sk_buff *skb1, *tmp;
-       struct tipc_msg *msg;
-       u16 seq_no;
-       u16 ackd;
-       u32 released;
+       struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
+       struct tipc_msg *hdr;
 
-       skb2list(skb, &head);
-
-       while ((skb = __skb_dequeue(&head))) {
-               /* Ensure message is well-formed */
-               if (unlikely(!tipc_msg_validate(skb)))
-                       goto discard;
-
-               /* Handle arrival of a non-unicast link message */
-               msg = buf_msg(skb);
-               if (unlikely(msg_non_seq(msg))) {
-                       if (msg_user(msg) ==  LINK_CONFIG)
-                               tipc_disc_rcv(net, skb, b_ptr);
-                       else
-                               tipc_bclink_rcv(net, skb);
-                       continue;
-               }
-
-               /* Discard unicast link messages destined for another node */
-               if (unlikely(!msg_short(msg) &&
-                            (msg_destnode(msg) != tn->own_addr)))
-                       goto discard;
-
-               /* Locate neighboring node that sent message */
-               n_ptr = tipc_node_find(net, msg_prevnode(msg));
-               if (unlikely(!n_ptr))
-                       goto discard;
-
-               tipc_node_lock(n_ptr);
-               /* Locate unicast link endpoint that should handle message */
-               l_ptr = n_ptr->links[b_ptr->identity];
-               if (unlikely(!l_ptr))
-                       goto unlock;
-
-               /* Verify that communication with node is currently allowed */
-               if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
-                   msg_user(msg) == LINK_PROTOCOL &&
-                   (msg_type(msg) == RESET_MSG ||
-                   msg_type(msg) == ACTIVATE_MSG) &&
-                   !msg_redundant_link(msg))
-                       n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
-
-               if (tipc_node_blocked(n_ptr))
-                       goto unlock;
-
-               /* Validate message sequence number info */
-               seq_no = msg_seqno(msg);
-               ackd = msg_ack(msg);
-
-               /* Release acked messages */
-               if (unlikely(n_ptr->bclink.acked != msg_bcast_ack(msg)))
-                       tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
-
-               released = 0;
-               skb_queue_walk_safe(&l_ptr->transmq, skb1, tmp) {
-                       if (more(buf_seqno(skb1), ackd))
-                               break;
-                        __skb_unlink(skb1, &l_ptr->transmq);
-                        kfree_skb(skb1);
-                        released = 1;
-               }
-
-               /* Try sending any messages link endpoint has pending */
-               if (unlikely(skb_queue_len(&l_ptr->backlogq)))
-                       tipc_link_push_packets(l_ptr);
-
-               if (released && !skb_queue_empty(&l_ptr->wakeupq))
-                       link_prepare_wakeup(l_ptr);
-
-               /* Process the incoming packet */
-               if (unlikely(!link_working_working(l_ptr))) {
-                       if (msg_user(msg) == LINK_PROTOCOL) {
-                               tipc_link_proto_rcv(l_ptr, skb);
-                               link_retrieve_defq(l_ptr, &head);
-                               skb = NULL;
-                               goto unlock;
-                       }
-
-                       /* Traffic message. Conditionally activate link */
-                       link_state_event(l_ptr, TRAFFIC_MSG_EVT);
-
-                       if (link_working_working(l_ptr)) {
-                               /* Re-insert buffer in front of queue */
-                               __skb_queue_head(&head, skb);
-                               skb = NULL;
-                               goto unlock;
-                       }
-                       goto unlock;
-               }
-
-               /* Link is now in state WORKING_WORKING */
-               if (unlikely(seq_no != l_ptr->rcv_nxt)) {
-                       link_handle_out_of_seq_msg(l_ptr, skb);
-                       link_retrieve_defq(l_ptr, &head);
-                       skb = NULL;
-                       goto unlock;
-               }
-               l_ptr->silent_intv_cnt = 0;
+       if (!skb)
+               return 0;
 
-               /* Synchronize with parallel link if applicable */
-               if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) {
-                       if (!link_synch(l_ptr))
-                               goto unlock;
-               }
-               l_ptr->rcv_nxt++;
-               if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
-                       link_retrieve_defq(l_ptr, &head);
-               if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
-                       l_ptr->stats.sent_acks++;
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
-               }
-               tipc_link_input(l_ptr, skb);
-               skb = NULL;
-unlock:
-               tipc_node_unlock(n_ptr);
-               tipc_node_put(n_ptr);
-discard:
-               if (unlikely(skb))
-                       kfree_skb(skb);
+       /* Detect repeated retransmit failures on same packet */
+       if (likely(l->last_retransm != buf_seqno(skb))) {
+               l->last_retransm = buf_seqno(skb);
+               l->stale_count = 1;
+       } else if (++l->stale_count > 100) {
+               link_retransmit_failure(l, skb);
+               return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+       }
+       skb_queue_walk(&l->transmq, skb) {
+               if (!retransm)
+                       return 0;
+               hdr = buf_msg(skb);
+               _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
+               if (!_skb)
+                       return 0;
+               hdr = buf_msg(_skb);
+               msg_set_ack(hdr, l->rcv_nxt - 1);
+               msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+               _skb->priority = TC_PRIO_CONTROL;
+               __skb_queue_tail(xmitq, _skb);
+               retransm--;
+               l->stats.retransmitted++;
        }
+       return 0;
 }
 
 /* tipc_data_input - deliver data and name distr msgs to upper layer
@@ -1126,29 +942,22 @@ discard:
  * Consumes buffer if message is of right type
  * Node lock must be held
  */
-static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
+static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb,
+                           struct sk_buff_head *inputq)
 {
        struct tipc_node *node = link->owner;
-       struct tipc_msg *msg = buf_msg(skb);
-       u32 dport = msg_destport(msg);
 
-       switch (msg_user(msg)) {
+       switch (msg_user(buf_msg(skb))) {
        case TIPC_LOW_IMPORTANCE:
        case TIPC_MEDIUM_IMPORTANCE:
        case TIPC_HIGH_IMPORTANCE:
        case TIPC_CRITICAL_IMPORTANCE:
        case CONN_MANAGER:
-               if (tipc_skb_queue_tail(&link->inputq, skb, dport)) {
-                       node->inputq = &link->inputq;
-                       node->action_flags |= TIPC_MSG_EVT;
-               }
+               __skb_queue_tail(inputq, skb);
                return true;
        case NAME_DISTRIBUTOR:
                node->bclink.recv_permitted = true;
-               node->namedq = &link->namedq;
-               skb_queue_tail(&link->namedq, skb);
-               if (skb_queue_len(&link->namedq) == 1)
-                       node->action_flags |= TIPC_NAMED_MSG_EVT;
+               skb_queue_tail(link->namedq, skb);
                return true;
        case MSG_BUNDLER:
        case TUNNEL_PROTOCOL:
@@ -1165,54 +974,160 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
 /* tipc_link_input - process packet that has passed link protocol check
  *
  * Consumes buffer
- * Node lock must be held
  */
-static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
+static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
+                          struct sk_buff_head *inputq)
 {
-       struct tipc_node *node = link->owner;
-       struct tipc_msg *msg = buf_msg(skb);
+       struct tipc_node *node = l->owner;
+       struct tipc_msg *hdr = buf_msg(skb);
+       struct sk_buff **reasm_skb = &l->reasm_buf;
        struct sk_buff *iskb;
+       int usr = msg_user(hdr);
+       int rc = 0;
        int pos = 0;
+       int ipos = 0;
 
-       if (likely(tipc_data_input(link, skb)))
-               return;
+       if (unlikely(usr == TUNNEL_PROTOCOL)) {
+               if (msg_type(hdr) == SYNCH_MSG) {
+                       __skb_queue_purge(&l->deferdq);
+                       goto drop;
+               }
+               if (!tipc_msg_extract(skb, &iskb, &ipos))
+                       return rc;
+               kfree_skb(skb);
+               skb = iskb;
+               hdr = buf_msg(skb);
+               if (less(msg_seqno(hdr), l->drop_point))
+                       goto drop;
+               if (tipc_data_input(l, skb, inputq))
+                       return rc;
+               usr = msg_user(hdr);
+               reasm_skb = &l->failover_reasm_skb;
+       }
 
-       switch (msg_user(msg)) {
-       case TUNNEL_PROTOCOL:
-               if (msg_dup(msg)) {
-                       link->flags |= LINK_SYNCHING;
-                       link->synch_point = msg_seqno(msg_get_wrapped(msg));
-                       kfree_skb(skb);
-                       break;
+       if (usr == MSG_BUNDLER) {
+               l->stats.recv_bundles++;
+               l->stats.recv_bundled += msg_msgcnt(hdr);
+               while (tipc_msg_extract(skb, &iskb, &pos))
+                       tipc_data_input(l, iskb, inputq);
+               return 0;
+       } else if (usr == MSG_FRAGMENTER) {
+               l->stats.recv_fragments++;
+               if (tipc_buf_append(reasm_skb, &skb)) {
+                       l->stats.recv_fragmented++;
+                       tipc_data_input(l, skb, inputq);
+               } else if (!*reasm_skb) {
+                       return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
                }
-               if (!tipc_link_failover_rcv(link, &skb))
-                       break;
-               if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
-                       tipc_data_input(link, skb);
+               return 0;
+       } else if (usr == BCAST_PROTOCOL) {
+               tipc_link_sync_rcv(node, skb);
+               return 0;
+       }
+drop:
+       kfree_skb(skb);
+       return 0;
+}
+
+static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
+{
+       bool released = false;
+       struct sk_buff *skb, *tmp;
+
+       skb_queue_walk_safe(&l->transmq, skb, tmp) {
+               if (more(buf_seqno(skb), acked))
                        break;
+               __skb_unlink(skb, &l->transmq);
+               kfree_skb(skb);
+               released = true;
+       }
+       return released;
+}
+
+/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
+ * @link: the link that should handle the message
+ * @skb: TIPC packet
+ * @xmitq: queue to place packets to be sent after this call
+ */
+int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
+                 struct sk_buff_head *xmitq)
+{
+       struct sk_buff_head *arrvq = &l->deferdq;
+       struct sk_buff_head tmpq;
+       struct tipc_msg *hdr;
+       u16 seqno, rcv_nxt;
+       int rc = 0;
+
+       __skb_queue_head_init(&tmpq);
+
+       if (unlikely(!__tipc_skb_queue_sorted(arrvq, skb))) {
+               if (!(skb_queue_len(arrvq) % TIPC_NACK_INTV))
+                       tipc_link_build_proto_msg(l, STATE_MSG, 0,
+                                                 0, 0, 0, xmitq);
+               return rc;
+       }
+
+       while ((skb = skb_peek(arrvq))) {
+               hdr = buf_msg(skb);
+
+               /* Verify and update link state */
+               if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) {
+                       __skb_dequeue(arrvq);
+                       rc = tipc_link_proto_rcv(l, skb, xmitq);
+                       continue;
                }
-       case MSG_BUNDLER:
-               link->stats.recv_bundles++;
-               link->stats.recv_bundled += msg_msgcnt(msg);
 
-               while (tipc_msg_extract(skb, &iskb, &pos))
-                       tipc_data_input(link, iskb);
-               break;
-       case MSG_FRAGMENTER:
-               link->stats.recv_fragments++;
-               if (tipc_buf_append(&link->reasm_buf, &skb)) {
-                       link->stats.recv_fragmented++;
-                       tipc_data_input(link, skb);
-               } else if (!link->reasm_buf) {
-                       tipc_link_reset(link);
+               if (unlikely(!link_is_up(l))) {
+                       rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
+                       if (!link_is_up(l)) {
+                               kfree_skb(__skb_dequeue(arrvq));
+                               goto exit;
+                       }
                }
-               break;
-       case BCAST_PROTOCOL:
-               tipc_link_sync_rcv(node, skb);
-               break;
-       default:
-               break;
-       };
+
+               l->silent_intv_cnt = 0;
+
+               /* Forward queues and wake up waiting users */
+               if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
+                       tipc_link_advance_backlog(l, xmitq);
+                       if (unlikely(!skb_queue_empty(&l->wakeupq)))
+                               link_prepare_wakeup(l);
+               }
+
+               /* Defer reception if there is a gap in the sequence */
+               seqno = msg_seqno(hdr);
+               rcv_nxt = l->rcv_nxt;
+               if (unlikely(less(rcv_nxt, seqno))) {
+                       l->stats.deferred_recv++;
+                       goto exit;
+               }
+
+               __skb_dequeue(arrvq);
+
+               /* Drop if packet already received */
+               if (unlikely(more(rcv_nxt, seqno))) {
+                       l->stats.duplicates++;
+                       kfree_skb(skb);
+                       goto exit;
+               }
+
+               /* Packet can be delivered */
+               l->rcv_nxt++;
+               l->stats.recv_info++;
+               if (unlikely(!tipc_data_input(l, skb, &tmpq)))
+                       rc = tipc_link_input(l, skb, &tmpq);
+
+               /* Ack at regular intervals */
+               if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
+                       l->rcv_unacked = 0;
+                       l->stats.sent_acks++;
+                       tipc_link_build_proto_msg(l, STATE_MSG,
+                                                 0, 0, 0, 0, xmitq);
+               }
+       }
+exit:
+       tipc_skb_queue_splice_tail(&tmpq, l->inputq);
+       return rc;
 }
 
 /**
@@ -1255,458 +1170,249 @@ u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
 }
 
 /*
- * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
+ * Send protocol message to the other endpoint.
  */
-static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
-                                      struct sk_buff *buf)
+void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg,
+                         u32 gap, u32 tolerance, u32 priority)
 {
-       u32 seq_no = buf_seqno(buf);
+       struct sk_buff *skb = NULL;
+       struct sk_buff_head xmitq;
 
-       if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
-               tipc_link_proto_rcv(l_ptr, buf);
+       __skb_queue_head_init(&xmitq);
+       tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap,
+                                 tolerance, priority, &xmitq);
+       skb = __skb_dequeue(&xmitq);
+       if (!skb)
                return;
-       }
-
-       /* Record OOS packet arrival */
-       l_ptr->silent_intv_cnt = 0;
+       tipc_bearer_send(l->owner->net, l->bearer_id, skb, l->media_addr);
+       l->rcv_unacked = 0;
+       kfree_skb(skb);
+}
 
-       /*
-        * Discard packet if a duplicate; otherwise add it to deferred queue
-        * and notify peer of gap as per protocol specification
-        */
-       if (less(seq_no, l_ptr->rcv_nxt)) {
-               l_ptr->stats.duplicates++;
-               kfree_skb(buf);
+/* tipc_link_build_proto_msg: prepare link protocol message for transmission
+ */
+static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
+                                     u16 rcvgap, int tolerance, int priority,
+                                     struct sk_buff_head *xmitq)
+{
+       struct sk_buff *skb = NULL;
+       struct tipc_msg *hdr = l->pmsg;
+       u16 snd_nxt = l->snd_nxt;
+       u16 rcv_nxt = l->rcv_nxt;
+       u16 rcv_last = rcv_nxt - 1;
+       int node_up = l->owner->bclink.recv_permitted;
+
+       /* Don't send protocol message during reset or link failover */
+       if (tipc_link_is_blocked(l))
                return;
-       }
 
-       if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) {
-               l_ptr->stats.deferred_recv++;
-               if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1)
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0);
+       msg_set_type(hdr, mtyp);
+       msg_set_net_plane(hdr, l->net_plane);
+       msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
+       msg_set_last_bcast(hdr, tipc_bclink_get_last_sent(l->owner->net));
+       msg_set_link_tolerance(hdr, tolerance);
+       msg_set_linkprio(hdr, priority);
+       msg_set_redundant_link(hdr, node_up);
+       msg_set_seq_gap(hdr, 0);
+
+       /* Compatibility: created msg must not be in sequence with pkt flow */
+       msg_set_seqno(hdr, snd_nxt + U16_MAX / 2);
+
+       if (mtyp == STATE_MSG) {
+               if (!tipc_link_is_up(l))
+                       return;
+               msg_set_next_sent(hdr, snd_nxt);
+
+               /* Override rcvgap if there are packets in deferred queue */
+               if (!skb_queue_empty(&l->deferdq))
+                       rcvgap = buf_seqno(skb_peek(&l->deferdq)) - rcv_nxt;
+               if (rcvgap) {
+                       msg_set_seq_gap(hdr, rcvgap);
+                       l->stats.sent_nacks++;
+               }
+               msg_set_ack(hdr, rcv_last);
+               msg_set_probe(hdr, probe);
+               if (probe)
+                       l->stats.sent_probes++;
+               l->stats.sent_states++;
        } else {
-               l_ptr->stats.duplicates++;
+               /* RESET_MSG or ACTIVATE_MSG */
+               msg_set_max_pkt(hdr, l->advertised_mtu);
+               msg_set_ack(hdr, l->rcv_nxt - 1);
+               msg_set_next_sent(hdr, 1);
        }
+       skb = tipc_buf_acquire(msg_size(hdr));
+       if (!skb)
+               return;
+       skb_copy_to_linear_data(skb, hdr, msg_size(hdr));
+       skb->priority = TC_PRIO_CONTROL;
+       __skb_queue_tail(xmitq, skb);
 }
 
-/*
- * Send protocol message to the other endpoint.
+/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
+ * with contents of the link's tranmsit and backlog queues.
  */
-void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
-                         u32 gap, u32 tolerance, u32 priority)
+void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
+                          int mtyp, struct sk_buff_head *xmitq)
 {
-       struct sk_buff *buf = NULL;
-       struct tipc_msg *msg = l_ptr->pmsg;
-       u32 msg_size = sizeof(l_ptr->proto_msg);
-       int r_flag;
-       u16 last_rcv;
-
-       /* Don't send protocol message during link failover */
-       if (l_ptr->flags & LINK_FAILINGOVER)
-               return;
+       struct sk_buff *skb, *tnlskb;
+       struct tipc_msg *hdr, tnlhdr;
+       struct sk_buff_head *queue = &l->transmq;
+       struct sk_buff_head tmpxq, tnlq;
+       u16 pktlen, pktcnt, seqno = l->snd_nxt;
 
-       /* Abort non-RESET send if communication with node is prohibited */
-       if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
+       if (!tnl)
                return;
 
-       /* Create protocol message with "out-of-sequence" sequence number */
-       msg_set_type(msg, msg_typ);
-       msg_set_net_plane(msg, l_ptr->net_plane);
-       msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
-       msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
-
-       if (msg_typ == STATE_MSG) {
-               u16 next_sent = l_ptr->snd_nxt;
+       skb_queue_head_init(&tnlq);
+       skb_queue_head_init(&tmpxq);
 
-               if (!tipc_link_is_up(l_ptr))
+       /* At least one packet required for safe algorithm => add dummy */
+       skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
+                             BASIC_H_SIZE, 0, l->addr, link_own_addr(l),
+                             0, 0, TIPC_ERR_NO_PORT);
+       if (!skb) {
+               pr_warn("%sunable to create tunnel packet\n", link_co_err);
+               return;
+       }
+       skb_queue_tail(&tnlq, skb);
+       tipc_link_xmit(l, &tnlq, &tmpxq);
+       __skb_queue_purge(&tmpxq);
+
+       /* Initialize reusable tunnel packet header */
+       tipc_msg_init(link_own_addr(l), &tnlhdr, TUNNEL_PROTOCOL,
+                     mtyp, INT_H_SIZE, l->addr);
+       pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
+       msg_set_msgcnt(&tnlhdr, pktcnt);
+       msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
+tnl:
+       /* Wrap each packet into a tunnel packet */
+       skb_queue_walk(queue, skb) {
+               hdr = buf_msg(skb);
+               if (queue == &l->backlogq)
+                       msg_set_seqno(hdr, seqno++);
+               pktlen = msg_size(hdr);
+               msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
+               tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE);
+               if (!tnlskb) {
+                       pr_warn("%sunable to send packet\n", link_co_err);
                        return;
-               msg_set_next_sent(msg, next_sent);
-               if (!skb_queue_empty(&l_ptr->deferdq)) {
-                       last_rcv = buf_seqno(skb_peek(&l_ptr->deferdq));
-                       gap = mod(last_rcv - l_ptr->rcv_nxt);
                }
-               msg_set_seq_gap(msg, gap);
-               if (gap)
-                       l_ptr->stats.sent_nacks++;
-               msg_set_link_tolerance(msg, tolerance);
-               msg_set_linkprio(msg, priority);
-               msg_set_max_pkt(msg, l_ptr->mtu);
-               msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
-               msg_set_probe(msg, probe_msg != 0);
-               if (probe_msg)
-                       l_ptr->stats.sent_probes++;
-               l_ptr->stats.sent_states++;
-       } else {                /* RESET_MSG or ACTIVATE_MSG */
-               msg_set_ack(msg, mod(l_ptr->failover_checkpt - 1));
-               msg_set_seq_gap(msg, 0);
-               msg_set_next_sent(msg, 1);
-               msg_set_probe(msg, 0);
-               msg_set_link_tolerance(msg, l_ptr->tolerance);
-               msg_set_linkprio(msg, l_ptr->priority);
-               msg_set_max_pkt(msg, l_ptr->advertised_mtu);
+               skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
+               skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
+               __skb_queue_tail(&tnlq, tnlskb);
+       }
+       if (queue != &l->backlogq) {
+               queue = &l->backlogq;
+               goto tnl;
        }
 
-       r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
-       msg_set_redundant_link(msg, r_flag);
-       msg_set_linkprio(msg, l_ptr->priority);
-       msg_set_size(msg, msg_size);
-
-       msg_set_seqno(msg, mod(l_ptr->snd_nxt + (0xffff / 2)));
-
-       buf = tipc_buf_acquire(msg_size);
-       if (!buf)
-               return;
+       tipc_link_xmit(tnl, &tnlq, xmitq);
 
-       skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
-       buf->priority = TC_PRIO_CONTROL;
-       tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
-                        &l_ptr->media_addr);
-       l_ptr->rcv_unacked = 0;
-       kfree_skb(buf);
+       if (mtyp == FAILOVER_MSG) {
+               tnl->drop_point = l->rcv_nxt;
+               tnl->failover_reasm_skb = l->reasm_buf;
+               l->reasm_buf = NULL;
+       }
 }
 
-/*
- * Receive protocol message :
+/* tipc_link_proto_rcv(): receive link level protocol message :
  * Note that network plane id propagates through the network, and may
- * change at any time. The node with lowest address rules
+ * change at any time. The node with lowest numerical id determines
+ * network plane
  */
-static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
-                               struct sk_buff *buf)
+static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
+                              struct sk_buff_head *xmitq)
 {
-       u32 rec_gap = 0;
-       u32 msg_tol;
-       struct tipc_msg *msg = buf_msg(buf);
+       struct tipc_msg *hdr = buf_msg(skb);
+       u16 rcvgap = 0;
+       u16 nacked_gap = msg_seq_gap(hdr);
+       u16 peers_snd_nxt =  msg_next_sent(hdr);
+       u16 peers_tol = msg_link_tolerance(hdr);
+       u16 peers_prio = msg_linkprio(hdr);
+       char *if_name;
+       int rc = 0;
 
-       if (l_ptr->flags & LINK_FAILINGOVER)
+       if (tipc_link_is_blocked(l))
                goto exit;
 
-       if (l_ptr->net_plane != msg_net_plane(msg))
-               if (link_own_addr(l_ptr) > msg_prevnode(msg))
-                       l_ptr->net_plane = msg_net_plane(msg);
-
-       switch (msg_type(msg)) {
+       if (link_own_addr(l) > msg_prevnode(hdr))
+               l->net_plane = msg_net_plane(hdr);
 
+       switch (msg_type(hdr)) {
        case RESET_MSG:
-               if (!link_working_unknown(l_ptr) &&
-                   (l_ptr->peer_session != INVALID_SESSION)) {
-                       if (less_eq(msg_session(msg), l_ptr->peer_session))
-                               break; /* duplicate or old reset: ignore */
-               }
-
-               if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
-                               link_working_unknown(l_ptr))) {
-                       /*
-                        * peer has lost contact -- don't allow peer's links
-                        * to reactivate before we recognize loss & clean up
-                        */
-                       l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
-               }
-
-               link_state_event(l_ptr, RESET_MSG);
 
+               /* Ignore duplicate RESET with old session number */
+               if ((less_eq(msg_session(hdr), l->peer_session)) &&
+                   (l->peer_session != WILDCARD_SESSION))
+                       break;
                /* fall thru' */
-       case ACTIVATE_MSG:
-               /* Update link settings according other endpoint's values */
-               strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
-
-               msg_tol = msg_link_tolerance(msg);
-               if (msg_tol > l_ptr->tolerance)
-                       link_set_supervision_props(l_ptr, msg_tol);
-
-               if (msg_linkprio(msg) > l_ptr->priority)
-                       l_ptr->priority = msg_linkprio(msg);
-
-               if (l_ptr->mtu > msg_max_pkt(msg))
-                       l_ptr->mtu = msg_max_pkt(msg);
 
-               /* Synchronize broadcast link info, if not done previously */
-               if (!tipc_node_is_up(l_ptr->owner)) {
-                       l_ptr->owner->bclink.last_sent =
-                               l_ptr->owner->bclink.last_in =
-                               msg_last_bcast(msg);
-                       l_ptr->owner->bclink.oos_state = 0;
-               }
-
-               l_ptr->peer_session = msg_session(msg);
-               l_ptr->peer_bearer_id = msg_bearer_id(msg);
-
-               if (msg_type(msg) == ACTIVATE_MSG)
-                       link_state_event(l_ptr, ACTIVATE_MSG);
-               break;
-       case STATE_MSG:
+       case ACTIVATE_MSG:
 
-               msg_tol = msg_link_tolerance(msg);
-               if (msg_tol)
-                       link_set_supervision_props(l_ptr, msg_tol);
-
-               if (msg_linkprio(msg) &&
-                   (msg_linkprio(msg) != l_ptr->priority)) {
-                       pr_debug("%s<%s>, priority change %u->%u\n",
-                                link_rst_msg, l_ptr->name,
-                                l_ptr->priority, msg_linkprio(msg));
-                       l_ptr->priority = msg_linkprio(msg);
-                       tipc_link_reset(l_ptr); /* Enforce change to take effect */
+               /* Complete own link name with peer's interface name */
+               if_name =  strrchr(l->name, ':') + 1;
+               if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
                        break;
-               }
-
-               /* Record reception; force mismatch at next timeout: */
-               l_ptr->silent_intv_cnt = 0;
-
-               link_state_event(l_ptr, TRAFFIC_MSG_EVT);
-               l_ptr->stats.recv_states++;
-               if (link_reset_unknown(l_ptr))
+               if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
                        break;
+               strncpy(if_name, msg_data(hdr), TIPC_MAX_IF_NAME);
 
-               if (less_eq(l_ptr->rcv_nxt, msg_next_sent(msg)))
-                       rec_gap = mod(msg_next_sent(msg) - l_ptr->rcv_nxt);
-
-               if (msg_probe(msg))
-                       l_ptr->stats.recv_probes++;
+               /* Update own tolerance if peer indicates a non-zero value */
+               if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+                       l->tolerance = peers_tol;
 
-               /* Protocol message before retransmits, reduce loss risk */
-               if (l_ptr->owner->bclink.recv_permitted)
-                       tipc_bclink_update_link_state(l_ptr->owner,
-                                                     msg_last_bcast(msg));
+               /* Update own priority if peer's priority is higher */
+               if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
+                       l->priority = peers_prio;
 
-               if (rec_gap || (msg_probe(msg))) {
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0,
-                                            rec_gap, 0, 0);
-               }
-               if (msg_seq_gap(msg)) {
-                       l_ptr->stats.recv_nacks++;
-                       tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->transmq),
-                                            msg_seq_gap(msg));
+               if (msg_type(hdr) == RESET_MSG) {
+                       rc |= tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
+               } else if (!link_is_up(l)) {
+                       tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
+                       rc |= tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
                }
+               l->peer_session = msg_session(hdr);
+               l->peer_bearer_id = msg_bearer_id(hdr);
+               if (l->mtu > msg_max_pkt(hdr))
+                       l->mtu = msg_max_pkt(hdr);
                break;
-       }
-exit:
-       kfree_skb(buf);
-}
-
-
-/* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
- * a different bearer. Owner node is locked.
- */
-static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
-                                 struct tipc_msg *tunnel_hdr,
-                                 struct tipc_msg *msg,
-                                 u32 selector)
-{
-       struct tipc_link *tunnel;
-       struct sk_buff *skb;
-       u32 length = msg_size(msg);
-
-       tunnel = l_ptr->owner->active_links[selector & 1];
-       if (!tipc_link_is_up(tunnel)) {
-               pr_warn("%stunnel link no longer available\n", link_co_err);
-               return;
-       }
-       msg_set_size(tunnel_hdr, length + INT_H_SIZE);
-       skb = tipc_buf_acquire(length + INT_H_SIZE);
-       if (!skb) {
-               pr_warn("%sunable to send tunnel msg\n", link_co_err);
-               return;
-       }
-       skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
-       skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
-       __tipc_link_xmit_skb(tunnel, skb);
-}
-
 
-/* tipc_link_failover_send_queue(): A link has gone down, but a second
- * link is still active. We can do failover. Tunnel the failing link's
- * whole send queue via the remaining link. This way, we don't lose
- * any packets, and sequence order is preserved for subsequent traffic
- * sent over the remaining link. Owner node is locked.
- */
-void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
-{
-       int msgcount;
-       struct tipc_link *tunnel = l_ptr->owner->active_links[0];
-       struct tipc_msg tunnel_hdr;
-       struct sk_buff *skb;
-       int split_bundles;
-
-       if (!tunnel)
-               return;
-
-       tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
-                     FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
-
-       skb_queue_walk(&l_ptr->backlogq, skb) {
-               msg_set_seqno(buf_msg(skb), l_ptr->snd_nxt);
-               l_ptr->snd_nxt = mod(l_ptr->snd_nxt + 1);
-       }
-       skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
-       tipc_link_purge_backlog(l_ptr);
-       msgcount = skb_queue_len(&l_ptr->transmq);
-       msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
-       msg_set_msgcnt(&tunnel_hdr, msgcount);
-
-       if (skb_queue_empty(&l_ptr->transmq)) {
-               skb = tipc_buf_acquire(INT_H_SIZE);
-               if (skb) {
-                       skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
-                       msg_set_size(&tunnel_hdr, INT_H_SIZE);
-                       __tipc_link_xmit_skb(tunnel, skb);
-               } else {
-                       pr_warn("%sunable to send changeover msg\n",
-                               link_co_err);
-               }
-               return;
-       }
-
-       split_bundles = (l_ptr->owner->active_links[0] !=
-                        l_ptr->owner->active_links[1]);
-
-       skb_queue_walk(&l_ptr->transmq, skb) {
-               struct tipc_msg *msg = buf_msg(skb);
+       case STATE_MSG:
 
-               if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
-                       struct tipc_msg *m = msg_get_wrapped(msg);
-                       unchar *pos = (unchar *)m;
+               /* Update own tolerance if peer indicates a non-zero value */
+               if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+                       l->tolerance = peers_tol;
 
-                       msgcount = msg_msgcnt(msg);
-                       while (msgcount--) {
-                               msg_set_seqno(m, msg_seqno(msg));
-                               tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
-                                                     msg_link_selector(m));
-                               pos += align(msg_size(m));
-                               m = (struct tipc_msg *)pos;
-                       }
-               } else {
-                       tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
-                                             msg_link_selector(msg));
-               }
-       }
-}
-
-/* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
- * duplicate of the first link's send queue via the new link. This way, we
- * are guaranteed that currently queued packets from a socket are delivered
- * before future traffic from the same socket, even if this is using the
- * new link. The last arriving copy of each duplicate packet is dropped at
- * the receiving end by the regular protocol check, so packet cardinality
- * and sequence order is preserved per sender/receiver socket pair.
- * Owner node is locked.
- */
-void tipc_link_dup_queue_xmit(struct tipc_link *link,
-                             struct tipc_link *tnl)
-{
-       struct sk_buff *skb;
-       struct tipc_msg tnl_hdr;
-       struct sk_buff_head *queue = &link->transmq;
-       int mcnt;
-       u16 seqno;
-
-       tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
-                     SYNCH_MSG, INT_H_SIZE, link->addr);
-       mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
-       msg_set_msgcnt(&tnl_hdr, mcnt);
-       msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);
-
-tunnel_queue:
-       skb_queue_walk(queue, skb) {
-               struct sk_buff *outskb;
-               struct tipc_msg *msg = buf_msg(skb);
-               u32 len = msg_size(msg);
+               l->silent_intv_cnt = 0;
+               l->stats.recv_states++;
+               if (msg_probe(hdr))
+                       l->stats.recv_probes++;
+               rc = tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
+               if (!link_is_up(l))
+                       break;
 
-               msg_set_ack(msg, mod(link->rcv_nxt - 1));
-               msg_set_bcast_ack(msg, link->owner->bclink.last_in);
-               msg_set_size(&tnl_hdr, len + INT_H_SIZE);
-               outskb = tipc_buf_acquire(len + INT_H_SIZE);
-               if (outskb == NULL) {
-                       pr_warn("%sunable to send duplicate msg\n",
-                               link_co_err);
-                       return;
+               /* Send NACK if peer has sent pkts we haven't received yet */
+               if (more(peers_snd_nxt, l->rcv_nxt))
+                       rcvgap = peers_snd_nxt - l->rcv_nxt;
+               if (rcvgap || (msg_probe(hdr)))
+                       tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
+                                                 0, 0, xmitq);
+               tipc_link_release_pkts(l, msg_ack(hdr));
+
+               /* If NACK, retransmit will now start at right position */
+               if (nacked_gap) {
+                       rc = tipc_link_retransm(l, nacked_gap, xmitq);
+                       l->stats.recv_nacks++;
                }
-               skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE);
-               skb_copy_to_linear_data_offset(outskb, INT_H_SIZE,
-                                              skb->data, len);
-               __tipc_link_xmit_skb(tnl, outskb);
-               if (!tipc_link_is_up(link))
-                       return;
-       }
-       if (queue == &link->backlogq)
-               return;
-       seqno = link->snd_nxt;
-       skb_queue_walk(&link->backlogq, skb) {
-               msg_set_seqno(buf_msg(skb), seqno);
-               seqno = mod(seqno + 1);
-       }
-       queue = &link->backlogq;
-       goto tunnel_queue;
-}
 
-/*  tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet
- *  Owner node is locked.
- */
-static bool tipc_link_failover_rcv(struct tipc_link *link,
-                                  struct sk_buff **skb)
-{
-       struct tipc_msg *msg = buf_msg(*skb);
-       struct sk_buff *iskb = NULL;
-       struct tipc_link *pl = NULL;
-       int bearer_id = msg_bearer_id(msg);
-       int pos = 0;
-
-       if (msg_type(msg) != FAILOVER_MSG) {
-               pr_warn("%sunknown tunnel pkt received\n", link_co_err);
-               goto exit;
-       }
-       if (bearer_id >= MAX_BEARERS)
-               goto exit;
-
-       if (bearer_id == link->bearer_id)
-               goto exit;
-
-       pl = link->owner->links[bearer_id];
-       if (pl && tipc_link_is_up(pl))
-               tipc_link_reset(pl);
-
-       if (link->failover_pkts == FIRST_FAILOVER)
-               link->failover_pkts = msg_msgcnt(msg);
-
-       /* Should we expect an inner packet? */
-       if (!link->failover_pkts)
-               goto exit;
-
-       if (!tipc_msg_extract(*skb, &iskb, &pos)) {
-               pr_warn("%sno inner failover pkt\n", link_co_err);
-               *skb = NULL;
-               goto exit;
-       }
-       link->failover_pkts--;
-       *skb = NULL;
-
-       /* Was this packet already delivered? */
-       if (less(buf_seqno(iskb), link->failover_checkpt)) {
-               kfree_skb(iskb);
-               iskb = NULL;
-               goto exit;
-       }
-       if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) {
-               link->stats.recv_fragments++;
-               tipc_buf_append(&link->failover_skb, &iskb);
+               tipc_link_advance_backlog(l, xmitq);
+               if (unlikely(!skb_queue_empty(&l->wakeupq)))
+                       link_prepare_wakeup(l);
        }
 exit:
-       if (!link->failover_pkts && pl)
-               pl->flags &= ~LINK_FAILINGOVER;
-       kfree_skb(*skb);
-       *skb = iskb;
-       return *skb;
-}
-
-static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
-{
-       unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
-
-       if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
-               return;
-
-       l_ptr->tolerance = tol;
-       l_ptr->keepalive_intv = msecs_to_jiffies(intv);
-       l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->keepalive_intv));
+       kfree_skb(skb);
+       return rc;
 }
 
 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
@@ -1743,7 +1449,7 @@ static struct tipc_node *tipc_link_find_owner(struct net *net,
        list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
                tipc_node_lock(n_ptr);
                for (i = 0; i < MAX_BEARERS; i++) {
-                       l_ptr = n_ptr->links[i];
+                       l_ptr = n_ptr->links[i].link;
                        if (l_ptr && !strcmp(l_ptr->name, link_name)) {
                                *bearer_id = i;
                                found_node = n_ptr;
@@ -1770,27 +1476,16 @@ static void link_reset_statistics(struct tipc_link *l_ptr)
        l_ptr->stats.recv_info = l_ptr->rcv_nxt;
 }
 
-static void link_print(struct tipc_link *l_ptr, const char *str)
+static void link_print(struct tipc_link *l, const char *str)
 {
-       struct tipc_net *tn = net_generic(l_ptr->owner->net, tipc_net_id);
-       struct tipc_bearer *b_ptr;
-
-       rcu_read_lock();
-       b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
-       if (b_ptr)
-               pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
-       rcu_read_unlock();
-
-       if (link_working_unknown(l_ptr))
-               pr_cont(":WU\n");
-       else if (link_reset_reset(l_ptr))
-               pr_cont(":RR\n");
-       else if (link_reset_unknown(l_ptr))
-               pr_cont(":RU\n");
-       else if (link_working_working(l_ptr))
-               pr_cont(":WW\n");
-       else
-               pr_cont("\n");
+       struct sk_buff *hskb = skb_peek(&l->transmq);
+       u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt;
+       u16 tail = l->snd_nxt - 1;
+
+       pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
+       pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
+               skb_queue_len(&l->transmq), head, tail,
+               skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
 }
 
 /* Parse and validate nested (link) properties valid for media, bearer and link
@@ -1865,7 +1560,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
 
        tipc_node_lock(node);
 
-       link = node->links[bearer_id];
+       link = node->links[bearer_id].link;
        if (!link) {
                res = -EINVAL;
                goto out;
@@ -1885,7 +1580,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
                        u32 tol;
 
                        tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
-                       link_set_supervision_props(link, tol);
+                       link->tolerance = tol;
                        tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
                }
                if (props[TIPC_NLA_PROP_PRIO]) {
@@ -2055,10 +1750,11 @@ static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
        for (i = *prev_link; i < MAX_BEARERS; i++) {
                *prev_link = i;
 
-               if (!node->links[i])
+               if (!node->links[i].link)
                        continue;
 
-               err = __tipc_nl_add_link(net, msg, node->links[i], NLM_F_MULTI);
+               err = __tipc_nl_add_link(net, msg,
+                                        node->links[i].link, NLM_F_MULTI);
                if (err)
                        return err;
        }
@@ -2172,7 +1868,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
                        return -EINVAL;
 
                tipc_node_lock(node);
-               link = node->links[bearer_id];
+               link = node->links[bearer_id].link;
                if (!link) {
                        tipc_node_unlock(node);
                        nlmsg_free(msg.skb);
@@ -2227,7 +1923,7 @@ int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
 
        tipc_node_lock(node);
 
-       link = node->links[bearer_id];
+       link = node->links[bearer_id].link;
        if (!link) {
                tipc_node_unlock(node);
                return -EINVAL;
index ae0a0ea572f2961aca2617f9244ea74ebba15c6a..39ff8b6919a4271d31892992098be0fcf98acd0f 100644 (file)
  */
 #define INVALID_LINK_SEQ 0x10000
 
-/* Link working states
+/* Link FSM events:
  */
-#define WORKING_WORKING 560810u
-#define WORKING_UNKNOWN 560811u
-#define RESET_UNKNOWN   560812u
-#define RESET_RESET     560813u
+enum {
+       LINK_ESTABLISH_EVT       = 0xec1ab1e,
+       LINK_PEER_RESET_EVT      = 0x9eed0e,
+       LINK_FAILURE_EVT         = 0xfa110e,
+       LINK_RESET_EVT           = 0x10ca1d0e,
+       LINK_FAILOVER_BEGIN_EVT  = 0xfa110bee,
+       LINK_FAILOVER_END_EVT    = 0xfa110ede,
+       LINK_SYNCH_BEGIN_EVT     = 0xc1ccbee,
+       LINK_SYNCH_END_EVT       = 0xc1ccede
+};
 
-/* Link endpoint execution states
+/* Events returned from link at packet reception or at timeout
  */
-#define LINK_STARTED     0x0001
-#define LINK_STOPPED     0x0002
-#define LINK_SYNCHING    0x0004
-#define LINK_FAILINGOVER 0x0008
+enum {
+       TIPC_LINK_UP_EVT       = 1,
+       TIPC_LINK_DOWN_EVT     = (1 << 1)
+};
 
 /* Starting value for maximum packet size negotiation on unicast links
  * (unless bearer MTU is less)
@@ -106,7 +112,6 @@ struct tipc_stats {
  * @timer: link timer
  * @owner: pointer to peer node
  * @refcnt: reference counter for permanent references (owner node & timer)
- * @flags: execution state flags for link endpoint instance
  * @peer_session: link session # being used by peer end of link
  * @peer_bearer_id: bearer id used by link's peer endpoint
  * @bearer_id: local bearer id used by link
@@ -143,20 +148,17 @@ struct tipc_stats {
 struct tipc_link {
        u32 addr;
        char name[TIPC_MAX_LINK_NAME];
-       struct tipc_media_addr media_addr;
-       struct timer_list timer;
+       struct tipc_media_addr *media_addr;
        struct tipc_node *owner;
-       struct kref ref;
 
        /* Management and link supervision data */
-       unsigned int flags;
        u32 peer_session;
        u32 peer_bearer_id;
        u32 bearer_id;
        u32 tolerance;
        unsigned long keepalive_intv;
        u32 abort_limit;
-       int state;
+       u32 state;
        u32 silent_intv_cnt;
        struct {
                unchar hdr[INT_H_SIZE];
@@ -165,12 +167,10 @@ struct tipc_link {
        struct tipc_msg *pmsg;
        u32 priority;
        char net_plane;
-       u16 synch_point;
 
-       /* Failover */
-       u16 failover_pkts;
-       u16 failover_checkpt;
-       struct sk_buff *failover_skb;
+       /* Failover/synch */
+       u16 drop_point;
+       struct sk_buff *failover_reasm_skb;
 
        /* Max packet negotiation */
        u16 mtu;
@@ -192,8 +192,8 @@ struct tipc_link {
        u16 rcv_nxt;
        u32 rcv_unacked;
        struct sk_buff_head deferdq;
-       struct sk_buff_head inputq;
-       struct sk_buff_head namedq;
+       struct sk_buff_head *inputq;
+       struct sk_buff_head *namedq;
 
        /* Congestion handling */
        struct sk_buff_head wakeupq;
@@ -205,28 +205,29 @@ struct tipc_link {
        struct tipc_stats stats;
 };
 
-struct tipc_port;
-
-struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
-                             struct tipc_bearer *b_ptr,
-                             const struct tipc_media_addr *media_addr);
-void tipc_link_delete(struct tipc_link *link);
-void tipc_link_delete_list(struct net *net, unsigned int bearer_id);
-void tipc_link_failover_send_queue(struct tipc_link *l_ptr);
-void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *dest);
+bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session,
+                     u32 ownnode, u32 peer, struct tipc_media_addr *maddr,
+                     struct sk_buff_head *inputq, struct sk_buff_head *namedq,
+                     struct tipc_link **link);
+void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
+                          int mtyp, struct sk_buff_head *xmitq);
+void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
+                                   struct sk_buff_head *xmitq);
+int tipc_link_fsm_evt(struct tipc_link *l, int evt);
 void tipc_link_reset_fragments(struct tipc_link *l_ptr);
-int tipc_link_is_up(struct tipc_link *l_ptr);
+bool tipc_link_is_up(struct tipc_link *l);
+bool tipc_link_is_reset(struct tipc_link *l);
+bool tipc_link_is_synching(struct tipc_link *l);
+bool tipc_link_is_failingover(struct tipc_link *l);
+bool tipc_link_is_blocked(struct tipc_link *l);
 int tipc_link_is_active(struct tipc_link *l_ptr);
 void tipc_link_purge_queues(struct tipc_link *l_ptr);
 void tipc_link_purge_backlog(struct tipc_link *l);
-void tipc_link_reset_all(struct tipc_node *node);
 void tipc_link_reset(struct tipc_link *l_ptr);
-int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
-                      u32 selector);
-int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dest,
-                  u32 selector);
 int __tipc_link_xmit(struct net *net, struct tipc_link *link,
                     struct sk_buff_head *list);
+int tipc_link_xmit(struct tipc_link *link,     struct sk_buff_head *list,
+                  struct sk_buff_head *xmitq);
 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
                          u32 gap, u32 tolerance, u32 priority);
 void tipc_link_push_packets(struct tipc_link *l_ptr);
@@ -242,34 +243,8 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info);
 int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info);
 int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info);
 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]);
-void link_prepare_wakeup(struct tipc_link *l);
-
-static inline u32 link_own_addr(struct tipc_link *l)
-{
-       return msg_prevnode(l->pmsg);
-}
-
-/*
- * Link status checking routines
- */
-static inline int link_working_working(struct tipc_link *l_ptr)
-{
-       return l_ptr->state == WORKING_WORKING;
-}
-
-static inline int link_working_unknown(struct tipc_link *l_ptr)
-{
-       return l_ptr->state == WORKING_UNKNOWN;
-}
-
-static inline int link_reset_unknown(struct tipc_link *l_ptr)
-{
-       return l_ptr->state == RESET_UNKNOWN;
-}
-
-static inline int link_reset_reset(struct tipc_link *l_ptr)
-{
-       return l_ptr->state == RESET_RESET;
-}
+int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq);
+int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
+                 struct sk_buff_head *xmitq);
 
 #endif
index 08b4cc7d496d94c80fb2fcacc30ade0192fb0dc8..562c926a51cc7baa859115b6a0d444f73febf357 100644 (file)
@@ -463,60 +463,72 @@ bool tipc_msg_make_bundle(struct sk_buff **skb,  struct tipc_msg *msg,
 
 /**
  * tipc_msg_reverse(): swap source and destination addresses and add error code
- * @buf:  buffer containing message to be reversed
- * @dnode: return value: node where to send message after reversal
- * @err:  error code to be set in message
- * Consumes buffer if failure
+ * @own_node: originating node id for reversed message
+ * @skb:  buffer containing message to be reversed; may be replaced.
+ * @err:  error code to be set in message, if any
+ * Consumes buffer at failure
  * Returns true if success, otherwise false
  */
-bool tipc_msg_reverse(u32 own_addr,  struct sk_buff *buf, u32 *dnode,
-                     int err)
+bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
 {
-       struct tipc_msg *msg = buf_msg(buf);
+       struct sk_buff *_skb = *skb;
+       struct tipc_msg *hdr = buf_msg(_skb);
        struct tipc_msg ohdr;
-       uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE);
+       int dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE);
 
-       if (skb_linearize(buf))
+       if (skb_linearize(_skb))
                goto exit;
-       msg = buf_msg(buf);
-       if (msg_dest_droppable(msg))
+       hdr = buf_msg(_skb);
+       if (msg_dest_droppable(hdr))
                goto exit;
-       if (msg_errcode(msg))
+       if (msg_errcode(hdr))
                goto exit;
-       memcpy(&ohdr, msg, msg_hdr_sz(msg));
-       msg_set_errcode(msg, err);
-       msg_set_origport(msg, msg_destport(&ohdr));
-       msg_set_destport(msg, msg_origport(&ohdr));
-       msg_set_prevnode(msg, own_addr);
-       if (!msg_short(msg)) {
-               msg_set_orignode(msg, msg_destnode(&ohdr));
-               msg_set_destnode(msg, msg_orignode(&ohdr));
+
+       /* Take a copy of original header before altering message */
+       memcpy(&ohdr, hdr, msg_hdr_sz(hdr));
+
+       /* Never return SHORT header; expand by replacing buffer if necessary */
+       if (msg_short(hdr)) {
+               *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen);
+               if (!*skb)
+                       goto exit;
+               memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen);
+               kfree_skb(_skb);
+               _skb = *skb;
+               hdr = buf_msg(_skb);
+               memcpy(hdr, &ohdr, BASIC_H_SIZE);
+               msg_set_hdr_sz(hdr, BASIC_H_SIZE);
        }
-       msg_set_size(msg, msg_hdr_sz(msg) + rdsz);
-       skb_trim(buf, msg_size(msg));
-       skb_orphan(buf);
-       *dnode = msg_orignode(&ohdr);
+
+       /* Now reverse the concerned fields */
+       msg_set_errcode(hdr, err);
+       msg_set_origport(hdr, msg_destport(&ohdr));
+       msg_set_destport(hdr, msg_origport(&ohdr));
+       msg_set_destnode(hdr, msg_prevnode(&ohdr));
+       msg_set_prevnode(hdr, own_node);
+       msg_set_orignode(hdr, own_node);
+       msg_set_size(hdr, msg_hdr_sz(hdr) + dlen);
+       skb_trim(_skb, msg_size(hdr));
+       skb_orphan(_skb);
        return true;
 exit:
-       kfree_skb(buf);
-       *dnode = 0;
+       kfree_skb(_skb);
+       *skb = NULL;
        return false;
 }
 
 /**
  * tipc_msg_lookup_dest(): try to find new destination for named message
  * @skb: the buffer containing the message.
- * @dnode: return value: next-hop node, if destination found
- * @err: return value: error code to use, if message to be rejected
+ * @err: error code to be used by caller if lookup fails
  * Does not consume buffer
  * Returns true if a destination is found, false otherwise
  */
-bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb,
-                         u32 *dnode, int *err)
+bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
 {
        struct tipc_msg *msg = buf_msg(skb);
-       u32 dport;
-       u32 own_addr = tipc_own_addr(net);
+       u32 dport, dnode;
+       u32 onode = tipc_own_addr(net);
 
        if (!msg_isdata(msg))
                return false;
@@ -529,15 +541,15 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb,
                return false;
        if (msg_reroute_cnt(msg))
                return false;
-       *dnode = addr_domain(net, msg_lookup_scope(msg));
+       dnode = addr_domain(net, msg_lookup_scope(msg));
        dport = tipc_nametbl_translate(net, msg_nametype(msg),
-                                      msg_nameinst(msg), dnode);
+                                      msg_nameinst(msg), &dnode);
        if (!dport)
                return false;
        msg_incr_reroute_cnt(msg);
-       if (*dnode != own_addr)
-               msg_set_prevnode(msg, own_addr);
-       msg_set_destnode(msg, *dnode);
+       if (dnode != onode)
+               msg_set_prevnode(msg, onode);
+       msg_set_destnode(msg, dnode);
        msg_set_destport(msg, dport);
        *err = TIPC_OK;
        return true;
index 19c45fb66238816f6084a706e9ab7784a31faa8c..a82c5848d4bc22129bd1e6ba7f677795febdc9e9 100644 (file)
@@ -38,6 +38,7 @@
 #define _TIPC_MSG_H
 
 #include <linux/tipc.h>
+#include "core.h"
 
 /*
  * Constants and routines used to read and write TIPC payload message headers
@@ -109,7 +110,6 @@ struct tipc_skb_cb {
        struct sk_buff *tail;
        bool validated;
        bool wakeup_pending;
-       bool bundling;
        u16 chain_sz;
        u16 chain_imp;
 };
@@ -558,15 +558,6 @@ static inline void msg_set_node_capabilities(struct tipc_msg *m, u32 n)
        msg_set_bits(m, 1, 15, 0x1fff, n);
 }
 
-static inline bool msg_dup(struct tipc_msg *m)
-{
-       if (likely(msg_user(m) != TUNNEL_PROTOCOL))
-               return false;
-       if (msg_type(m) != SYNCH_MSG)
-               return false;
-       return true;
-}
-
 /*
  * Word 2
  */
@@ -620,12 +611,12 @@ static inline void msg_set_fragm_no(struct tipc_msg *m, u32 n)
 }
 
 
-static inline u32 msg_next_sent(struct tipc_msg *m)
+static inline u16 msg_next_sent(struct tipc_msg *m)
 {
        return msg_bits(m, 4, 0, 0xffff);
 }
 
-static inline void msg_set_next_sent(struct tipc_msg *m, u32 n)
+static inline void msg_set_next_sent(struct tipc_msg *m, u16 n)
 {
        msg_set_bits(m, 4, 0, 0xffff, n);
 }
@@ -658,12 +649,12 @@ static inline void msg_set_link_selector(struct tipc_msg *m, u32 n)
 /*
  * Word 5
  */
-static inline u32 msg_session(struct tipc_msg *m)
+static inline u16 msg_session(struct tipc_msg *m)
 {
        return msg_bits(m, 5, 16, 0xffff);
 }
 
-static inline void msg_set_session(struct tipc_msg *m, u32 n)
+static inline void msg_set_session(struct tipc_msg *m, u16 n)
 {
        msg_set_bits(m, 5, 16, 0xffff, n);
 }
@@ -726,12 +717,12 @@ static inline char *msg_media_addr(struct tipc_msg *m)
 /*
  * Word 9
  */
-static inline u32 msg_msgcnt(struct tipc_msg *m)
+static inline u16 msg_msgcnt(struct tipc_msg *m)
 {
        return msg_bits(m, 9, 16, 0xffff);
 }
 
-static inline void msg_set_msgcnt(struct tipc_msg *m, u32 n)
+static inline void msg_set_msgcnt(struct tipc_msg *m, u16 n)
 {
        msg_set_bits(m, 9, 16, 0xffff, n);
 }
@@ -766,10 +757,25 @@ static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n)
        msg_set_bits(m, 9, 0, 0xffff, n);
 }
 
+static inline bool msg_peer_link_is_up(struct tipc_msg *m)
+{
+       if (likely(msg_user(m) != LINK_PROTOCOL))
+               return true;
+       if (msg_type(m) == STATE_MSG)
+               return true;
+       return false;
+}
+
+static inline bool msg_peer_node_is_up(struct tipc_msg *m)
+{
+       if (msg_peer_link_is_up(m))
+               return true;
+       return msg_redundant_link(m);
+}
+
 struct sk_buff *tipc_buf_acquire(u32 size);
 bool tipc_msg_validate(struct sk_buff *skb);
-bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode,
-                     int err);
+bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err);
 void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,
                   u32 hsize, u32 destnode);
 struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
@@ -782,8 +788,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
 bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
 int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
                   int offset, int dsz, int mtu, struct sk_buff_head *list);
-bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, u32 *dnode,
-                         int *err);
+bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err);
 struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
 
 static inline u16 buf_seqno(struct sk_buff *skb)
@@ -857,26 +862,65 @@ static inline struct sk_buff *tipc_skb_dequeue(struct sk_buff_head *list,
        return skb;
 }
 
-/* tipc_skb_queue_tail(): add buffer to tail of list;
+/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
  * @list: list to be appended to
- * @skb: buffer to append. Always appended
- * @dport: the destination port of the buffer
- * returns true if dport differs from previous destination
+ * @skb: buffer to add
+ * Returns true if queue should treated further, otherwise false
  */
-static inline bool tipc_skb_queue_tail(struct sk_buff_head *list,
-                                      struct sk_buff *skb, u32 dport)
+static inline bool __tipc_skb_queue_sorted(struct sk_buff_head *list,
+                                          struct sk_buff *skb)
 {
-       struct sk_buff *_skb = NULL;
-       bool rv = false;
+       struct sk_buff *_skb, *tmp;
+       struct tipc_msg *hdr = buf_msg(skb);
+       u16 seqno = msg_seqno(hdr);
 
-       spin_lock_bh(&list->lock);
-       _skb = skb_peek_tail(list);
-       if (!_skb || (msg_destport(buf_msg(_skb)) != dport) ||
-           (skb_queue_len(list) > 32))
-               rv = true;
+       if (skb_queue_empty(list) || (msg_user(hdr) == LINK_PROTOCOL)) {
+               __skb_queue_head(list, skb);
+               return true;
+       }
+       if (likely(less(seqno, buf_seqno(skb_peek(list))))) {
+               __skb_queue_head(list, skb);
+               return true;
+       }
+       if (!more(seqno, buf_seqno(skb_peek_tail(list)))) {
+               skb_queue_walk_safe(list, _skb, tmp) {
+                       if (likely(less(seqno, buf_seqno(_skb)))) {
+                               __skb_queue_before(list, _skb, skb);
+                               return true;
+                       }
+               }
+       }
        __skb_queue_tail(list, skb);
+       return false;
+}
+
+/* tipc_skb_queue_splice_tail - append an skb list to lock protected list
+ * @list: the new list to append. Not lock protected
+ * @head: target list. Lock protected.
+ */
+static inline void tipc_skb_queue_splice_tail(struct sk_buff_head *list,
+                                             struct sk_buff_head *head)
+{
+       spin_lock_bh(&head->lock);
+       skb_queue_splice_tail(list, head);
+       spin_unlock_bh(&head->lock);
+}
+
+/* tipc_skb_queue_splice_tail_init - merge two lock protected skb lists
+ * @list: the new list to add. Lock protected. Will be reinitialized
+ * @head: target list. Lock protected.
+ */
+static inline void tipc_skb_queue_splice_tail_init(struct sk_buff_head *list,
+                                                  struct sk_buff_head *head)
+{
+       struct sk_buff_head tmp;
+
+       __skb_queue_head_init(&tmp);
+
+       spin_lock_bh(&list->lock);
+       skb_queue_splice_tail_init(list, &tmp);
        spin_unlock_bh(&list->lock);
-       return rv;
+       tipc_skb_queue_splice_tail(&tmp, head);
 }
 
 #endif
index 41e7b7e4dda0818469c17ff8b6e48aa1654a23ff..e6018b7eb1970dfc85bc7e0dc8945ccf45a72180 100644 (file)
@@ -96,13 +96,13 @@ void named_cluster_distribute(struct net *net, struct sk_buff *skb)
                dnode = node->addr;
                if (in_own_node(net, dnode))
                        continue;
-               if (!tipc_node_active_links(node))
+               if (!tipc_node_is_up(node))
                        continue;
                oskb = pskb_copy(skb, GFP_ATOMIC);
                if (!oskb)
                        break;
                msg_set_destnode(buf_msg(oskb), dnode);
-               tipc_link_xmit_skb(net, oskb, dnode, dnode);
+               tipc_node_xmit_skb(net, oskb, dnode, dnode);
        }
        rcu_read_unlock();
 
@@ -223,7 +223,7 @@ void tipc_named_node_up(struct net *net, u32 dnode)
                         &tn->nametbl->publ_list[TIPC_ZONE_SCOPE]);
        rcu_read_unlock();
 
-       tipc_link_xmit(net, &head, dnode, dnode);
+       tipc_node_xmit(net, &head, dnode, dnode);
 }
 
 static void tipc_publ_subscribe(struct net *net, struct publication *publ,
index 0b1d61a5f85334b3553780e8c0dd64c3f0549aa3..7c191641b44f64c080745df6615a8eccb237dd38 100644 (file)
 #include "name_distr.h"
 #include "socket.h"
 #include "bcast.h"
+#include "discover.h"
 
-static void node_lost_contact(struct tipc_node *n_ptr);
+/* Node FSM states and events:
+ */
+enum {
+       SELF_DOWN_PEER_DOWN    = 0xdd,
+       SELF_UP_PEER_UP        = 0xaa,
+       SELF_DOWN_PEER_LEAVING = 0xd1,
+       SELF_UP_PEER_COMING    = 0xac,
+       SELF_COMING_PEER_UP    = 0xca,
+       SELF_LEAVING_PEER_DOWN = 0x1d,
+       NODE_FAILINGOVER       = 0xf0,
+       NODE_SYNCHING          = 0xcc
+};
+
+enum {
+       SELF_ESTABL_CONTACT_EVT = 0xece,
+       SELF_LOST_CONTACT_EVT   = 0x1ce,
+       PEER_ESTABL_CONTACT_EVT = 0x9ece,
+       PEER_LOST_CONTACT_EVT   = 0x91ce,
+       NODE_FAILOVER_BEGIN_EVT = 0xfbe,
+       NODE_FAILOVER_END_EVT   = 0xfee,
+       NODE_SYNCH_BEGIN_EVT    = 0xcbe,
+       NODE_SYNCH_END_EVT      = 0xcee
+};
+
+static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
+                                 struct sk_buff_head *xmitq,
+                                 struct tipc_media_addr **maddr);
+static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
+                               bool delete);
+static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
 static void node_established_contact(struct tipc_node *n_ptr);
 static void tipc_node_delete(struct tipc_node *node);
+static void tipc_node_timeout(unsigned long data);
+static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
 
 struct tipc_sock_conn {
        u32 port;
@@ -110,7 +142,7 @@ struct tipc_node *tipc_node_find(struct net *net, u32 addr)
        return NULL;
 }
 
-struct tipc_node *tipc_node_create(struct net *net, u32 addr)
+struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_node *n_ptr, *temp_node;
@@ -126,12 +158,14 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr)
        }
        n_ptr->addr = addr;
        n_ptr->net = net;
+       n_ptr->capabilities = capabilities;
        kref_init(&n_ptr->kref);
        spin_lock_init(&n_ptr->lock);
        INIT_HLIST_NODE(&n_ptr->hash);
        INIT_LIST_HEAD(&n_ptr->list);
        INIT_LIST_HEAD(&n_ptr->publ_list);
        INIT_LIST_HEAD(&n_ptr->conn_sks);
+       skb_queue_head_init(&n_ptr->bclink.namedq);
        __skb_queue_head_init(&n_ptr->bclink.deferdq);
        hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
        list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
@@ -139,14 +173,32 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr)
                        break;
        }
        list_add_tail_rcu(&n_ptr->list, &temp_node->list);
-       n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN;
+       n_ptr->state = SELF_DOWN_PEER_LEAVING;
        n_ptr->signature = INVALID_NODE_SIG;
+       n_ptr->active_links[0] = INVALID_BEARER_ID;
+       n_ptr->active_links[1] = INVALID_BEARER_ID;
        tipc_node_get(n_ptr);
+       setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr);
+       n_ptr->keepalive_intv = U32_MAX;
 exit:
        spin_unlock_bh(&tn->node_list_lock);
        return n_ptr;
 }
 
+static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
+{
+       unsigned long tol = l->tolerance;
+       unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
+       unsigned long keepalive_intv = msecs_to_jiffies(intv);
+
+       /* Link with lowest tolerance determines timer interval */
+       if (keepalive_intv < n->keepalive_intv)
+               n->keepalive_intv = keepalive_intv;
+
+       /* Ensure link's abort limit corresponds to current interval */
+       l->abort_limit = l->tolerance / jiffies_to_msecs(n->keepalive_intv);
+}
+
 static void tipc_node_delete(struct tipc_node *node)
 {
        list_del_rcu(&node->list);
@@ -160,8 +212,11 @@ void tipc_node_stop(struct net *net)
        struct tipc_node *node, *t_node;
 
        spin_lock_bh(&tn->node_list_lock);
-       list_for_each_entry_safe(node, t_node, &tn->node_list, list)
+       list_for_each_entry_safe(node, t_node, &tn->node_list, list) {
+               if (del_timer(&node->timer))
+                       tipc_node_put(node);
                tipc_node_put(node);
+       }
        spin_unlock_bh(&tn->node_list_lock);
 }
 
@@ -219,158 +274,547 @@ void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
        tipc_node_put(node);
 }
 
+/* tipc_node_timeout - handle expiration of node timer
+ */
+static void tipc_node_timeout(unsigned long data)
+{
+       struct tipc_node *n = (struct tipc_node *)data;
+       struct tipc_link_entry *le;
+       struct sk_buff_head xmitq;
+       int bearer_id;
+       int rc = 0;
+
+       __skb_queue_head_init(&xmitq);
+
+       for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
+               tipc_node_lock(n);
+               le = &n->links[bearer_id];
+               if (le->link) {
+                       /* Link tolerance may change asynchronously: */
+                       tipc_node_calculate_timer(n, le->link);
+                       rc = tipc_link_timeout(le->link, &xmitq);
+               }
+               tipc_node_unlock(n);
+               tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr);
+               if (rc & TIPC_LINK_DOWN_EVT)
+                       tipc_node_link_down(n, bearer_id, false);
+       }
+       if (!mod_timer(&n->timer, jiffies + n->keepalive_intv))
+               tipc_node_get(n);
+       tipc_node_put(n);
+}
+
 /**
- * tipc_node_link_up - handle addition of link
- *
+ * __tipc_node_link_up - handle addition of link
+ * Node lock must be held by caller
  * Link becomes active (alone or shared) or standby, depending on its priority.
  */
-void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
+static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
+                               struct sk_buff_head *xmitq)
 {
-       struct tipc_link **active = &n_ptr->active_links[0];
+       int *slot0 = &n->active_links[0];
+       int *slot1 = &n->active_links[1];
+       struct tipc_link *ol = node_active_link(n, 0);
+       struct tipc_link *nl = n->links[bearer_id].link;
 
-       n_ptr->working_links++;
-       n_ptr->action_flags |= TIPC_NOTIFY_LINK_UP;
-       n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
+       if (!nl || !tipc_link_is_up(nl))
+               return;
 
-       pr_debug("Established link <%s> on network plane %c\n",
-                l_ptr->name, l_ptr->net_plane);
+       n->working_links++;
+       n->action_flags |= TIPC_NOTIFY_LINK_UP;
+       n->link_id = nl->peer_bearer_id << 16 | bearer_id;
 
-       if (!active[0]) {
-               active[0] = active[1] = l_ptr;
-               node_established_contact(n_ptr);
-               goto exit;
-       }
-       if (l_ptr->priority < active[0]->priority) {
-               pr_debug("New link <%s> becomes standby\n", l_ptr->name);
-               goto exit;
+       /* Leave room for tunnel header when returning 'mtu' to users: */
+       n->links[bearer_id].mtu = nl->mtu - INT_H_SIZE;
+
+       tipc_bearer_add_dest(n->net, bearer_id, n->addr);
+
+       pr_debug("Established link <%s> on network plane %c\n",
+                nl->name, nl->net_plane);
+
+       /* First link? => give it both slots */
+       if (!ol) {
+               *slot0 = bearer_id;
+               *slot1 = bearer_id;
+               tipc_link_build_bcast_sync_msg(nl, xmitq);
+               node_established_contact(n);
+               return;
        }
-       tipc_link_dup_queue_xmit(active[0], l_ptr);
-       if (l_ptr->priority == active[0]->priority) {
-               active[0] = l_ptr;
-               goto exit;
+
+       /* Second link => redistribute slots */
+       if (nl->priority > ol->priority) {
+               pr_debug("Old link <%s> becomes standby\n", ol->name);
+               *slot0 = bearer_id;
+               *slot1 = bearer_id;
+       } else if (nl->priority == ol->priority) {
+               *slot0 = bearer_id;
+       } else {
+               pr_debug("New link <%s> is standby\n", nl->name);
        }
-       pr_debug("Old link <%s> becomes standby\n", active[0]->name);
-       if (active[1] != active[0])
-               pr_debug("Old link <%s> becomes standby\n", active[1]->name);
-       active[0] = active[1] = l_ptr;
-exit:
-       /* Leave room for changeover header when returning 'mtu' to users: */
-       n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE;
-       n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE;
+
+       /* Prepare synchronization with first link */
+       tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
 }
 
 /**
- * node_select_active_links - select active link
+ * tipc_node_link_up - handle addition of link
+ *
+ * Link becomes active (alone or shared) or standby, depending on its priority.
  */
-static void node_select_active_links(struct tipc_node *n_ptr)
+static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
+                             struct sk_buff_head *xmitq)
 {
-       struct tipc_link **active = &n_ptr->active_links[0];
-       u32 i;
-       u32 highest_prio = 0;
+       tipc_node_lock(n);
+       __tipc_node_link_up(n, bearer_id, xmitq);
+       tipc_node_unlock(n);
+}
 
-       active[0] = active[1] = NULL;
+/**
+ * __tipc_node_link_down - handle loss of link
+ */
+static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
+                                 struct sk_buff_head *xmitq,
+                                 struct tipc_media_addr **maddr)
+{
+       struct tipc_link_entry *le = &n->links[*bearer_id];
+       int *slot0 = &n->active_links[0];
+       int *slot1 = &n->active_links[1];
+       int i, highest = 0;
+       struct tipc_link *l, *_l, *tnl;
+
+       l = n->links[*bearer_id].link;
+       if (!l || tipc_link_is_reset(l))
+               return;
 
-       for (i = 0; i < MAX_BEARERS; i++) {
-               struct tipc_link *l_ptr = n_ptr->links[i];
+       n->working_links--;
+       n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
+       n->link_id = l->peer_bearer_id << 16 | *bearer_id;
 
-               if (!l_ptr || !tipc_link_is_up(l_ptr) ||
-                   (l_ptr->priority < highest_prio))
-                       continue;
+       tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
+
+       pr_debug("Lost link <%s> on network plane %c\n",
+                l->name, l->net_plane);
 
-               if (l_ptr->priority > highest_prio) {
-                       highest_prio = l_ptr->priority;
-                       active[0] = active[1] = l_ptr;
-               } else {
-                       active[1] = l_ptr;
+       /* Select new active link if any available */
+       *slot0 = INVALID_BEARER_ID;
+       *slot1 = INVALID_BEARER_ID;
+       for (i = 0; i < MAX_BEARERS; i++) {
+               _l = n->links[i].link;
+               if (!_l || !tipc_link_is_up(_l))
+                       continue;
+               if (_l == l)
+                       continue;
+               if (_l->priority < highest)
+                       continue;
+               if (_l->priority > highest) {
+                       highest = _l->priority;
+                       *slot0 = i;
+                       *slot1 = i;
+                       continue;
                }
+               *slot1 = i;
+       }
+
+       if (!tipc_node_is_up(n)) {
+               tipc_link_reset(l);
+               node_lost_contact(n, &le->inputq);
+               return;
        }
+
+       /* There is still a working link => initiate failover */
+       tnl = node_active_link(n, 0);
+       n->sync_point = tnl->rcv_nxt + (U16_MAX / 2 - 1);
+       tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
+       tipc_link_reset(l);
+       tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
+       tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
+       *maddr = &n->links[tnl->bearer_id].maddr;
+       *bearer_id = tnl->bearer_id;
 }
 
-/**
- * tipc_node_link_down - handle loss of link
- */
-void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
+static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
 {
-       struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
-       struct tipc_link **active;
+       struct tipc_link_entry *le = &n->links[bearer_id];
+       struct tipc_media_addr *maddr;
+       struct sk_buff_head xmitq;
+
+       __skb_queue_head_init(&xmitq);
+
+       tipc_node_lock(n);
+       __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
+       if (delete && le->link) {
+               kfree(le->link);
+               le->link = NULL;
+               n->link_cnt--;
+       }
+       tipc_node_unlock(n);
 
-       n_ptr->working_links--;
-       n_ptr->action_flags |= TIPC_NOTIFY_LINK_DOWN;
-       n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
+       tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
+       tipc_sk_rcv(n->net, &le->inputq);
+}
 
-       if (!tipc_link_is_active(l_ptr)) {
-               pr_debug("Lost standby link <%s> on network plane %c\n",
-                        l_ptr->name, l_ptr->net_plane);
-               return;
-       }
-       pr_debug("Lost link <%s> on network plane %c\n",
-                l_ptr->name, l_ptr->net_plane);
-
-       active = &n_ptr->active_links[0];
-       if (active[0] == l_ptr)
-               active[0] = active[1];
-       if (active[1] == l_ptr)
-               active[1] = active[0];
-       if (active[0] == l_ptr)
-               node_select_active_links(n_ptr);
-       if (tipc_node_is_up(n_ptr))
-               tipc_link_failover_send_queue(l_ptr);
-       else
-               node_lost_contact(n_ptr);
-
-       /* Leave room for changeover header when returning 'mtu' to users: */
-       if (active[0]) {
-               n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE;
-               n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE;
+bool tipc_node_is_up(struct tipc_node *n)
+{
+       return n->active_links[0] != INVALID_BEARER_ID;
+}
+
+void tipc_node_check_dest(struct net *net, u32 onode,
+                         struct tipc_bearer *b,
+                         u16 capabilities, u32 signature,
+                         struct tipc_media_addr *maddr,
+                         bool *respond, bool *dupl_addr)
+{
+       struct tipc_node *n;
+       struct tipc_link *l;
+       struct tipc_link_entry *le;
+       bool addr_match = false;
+       bool sign_match = false;
+       bool link_up = false;
+       bool accept_addr = false;
+       bool reset = true;
+
+       *dupl_addr = false;
+       *respond = false;
+
+       n = tipc_node_create(net, onode, capabilities);
+       if (!n)
                return;
+
+       tipc_node_lock(n);
+
+       le = &n->links[b->identity];
+
+       /* Prepare to validate requesting node's signature and media address */
+       l = le->link;
+       link_up = l && tipc_link_is_up(l);
+       addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
+       sign_match = (signature == n->signature);
+
+       /* These three flags give us eight permutations: */
+
+       if (sign_match && addr_match && link_up) {
+               /* All is fine. Do nothing. */
+               reset = false;
+       } else if (sign_match && addr_match && !link_up) {
+               /* Respond. The link will come up in due time */
+               *respond = true;
+       } else if (sign_match && !addr_match && link_up) {
+               /* Peer has changed i/f address without rebooting.
+                * If so, the link will reset soon, and the next
+                * discovery will be accepted. So we can ignore it.
+                * It may also be an cloned or malicious peer having
+                * chosen the same node address and signature as an
+                * existing one.
+                * Ignore requests until the link goes down, if ever.
+                */
+               *dupl_addr = true;
+       } else if (sign_match && !addr_match && !link_up) {
+               /* Peer link has changed i/f address without rebooting.
+                * It may also be a cloned or malicious peer; we can't
+                * distinguish between the two.
+                * The signature is correct, so we must accept.
+                */
+               accept_addr = true;
+               *respond = true;
+       } else if (!sign_match && addr_match && link_up) {
+               /* Peer node rebooted. Two possibilities:
+                *  - Delayed re-discovery; this link endpoint has already
+                *    reset and re-established contact with the peer, before
+                *    receiving a discovery message from that node.
+                *    (The peer happened to receive one from this node first).
+                *  - The peer came back so fast that our side has not
+                *    discovered it yet. Probing from this side will soon
+                *    reset the link, since there can be no working link
+                *    endpoint at the peer end, and the link will re-establish.
+                *  Accept the signature, since it comes from a known peer.
+                */
+               n->signature = signature;
+       } else if (!sign_match && addr_match && !link_up) {
+               /*  The peer node has rebooted.
+                *  Accept signature, since it is a known peer.
+                */
+               n->signature = signature;
+               *respond = true;
+       } else if (!sign_match && !addr_match && link_up) {
+               /* Peer rebooted with new address, or a new/duplicate peer.
+                * Ignore until the link goes down, if ever.
+                */
+               *dupl_addr = true;
+       } else if (!sign_match && !addr_match && !link_up) {
+               /* Peer rebooted with new address, or it is a new peer.
+                * Accept signature and address.
+                */
+               n->signature = signature;
+               accept_addr = true;
+               *respond = true;
        }
-       /* Loopback link went down? No fragmentation needed from now on. */
-       if (n_ptr->addr == tn->own_addr) {
-               n_ptr->act_mtus[0] = MAX_MSG_SIZE;
-               n_ptr->act_mtus[1] = MAX_MSG_SIZE;
+
+       if (!accept_addr)
+               goto exit;
+
+       /* Now create new link if not already existing */
+       if (!l) {
+               if (n->link_cnt == 2) {
+                       pr_warn("Cannot establish 3rd link to %x\n", n->addr);
+                       goto exit;
+               }
+               if (!tipc_link_create(n, b, mod(tipc_net(net)->random),
+                                     tipc_own_addr(net), onode, &le->maddr,
+                                     &le->inputq, &n->bclink.namedq, &l)) {
+                       *respond = false;
+                       goto exit;
+               }
+               tipc_link_reset(l);
+               le->link = l;
+               n->link_cnt++;
+               tipc_node_calculate_timer(n, l);
+               if (n->link_cnt == 1)
+                       if (!mod_timer(&n->timer, jiffies + n->keepalive_intv))
+                               tipc_node_get(n);
        }
+       memcpy(&le->maddr, maddr, sizeof(*maddr));
+exit:
+       tipc_node_unlock(n);
+       if (reset)
+               tipc_node_link_down(n, b->identity, false);
+       tipc_node_put(n);
 }
 
-int tipc_node_active_links(struct tipc_node *n_ptr)
+void tipc_node_delete_links(struct net *net, int bearer_id)
 {
-       return n_ptr->active_links[0] != NULL;
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_node *n;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(n, &tn->node_list, list) {
+               tipc_node_link_down(n, bearer_id, true);
+       }
+       rcu_read_unlock();
 }
 
-int tipc_node_is_up(struct tipc_node *n_ptr)
+static void tipc_node_reset_links(struct tipc_node *n)
 {
-       return tipc_node_active_links(n_ptr);
+       char addr_string[16];
+       int i;
+
+       pr_warn("Resetting all links to %s\n",
+               tipc_addr_string_fill(addr_string, n->addr));
+
+       for (i = 0; i < MAX_BEARERS; i++) {
+               tipc_node_link_down(n, i, false);
+       }
 }
 
-void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
+/* tipc_node_fsm_evt - node finite state machine
+ * Determines when contact is allowed with peer node
+ */
+static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
 {
-       n_ptr->links[l_ptr->bearer_id] = l_ptr;
-       n_ptr->link_cnt++;
+       int state = n->state;
+
+       switch (state) {
+       case SELF_DOWN_PEER_DOWN:
+               switch (evt) {
+               case SELF_ESTABL_CONTACT_EVT:
+                       state = SELF_UP_PEER_COMING;
+                       break;
+               case PEER_ESTABL_CONTACT_EVT:
+                       state = SELF_COMING_PEER_UP;
+                       break;
+               case SELF_LOST_CONTACT_EVT:
+               case PEER_LOST_CONTACT_EVT:
+                       break;
+               case NODE_SYNCH_END_EVT:
+               case NODE_SYNCH_BEGIN_EVT:
+               case NODE_FAILOVER_BEGIN_EVT:
+               case NODE_FAILOVER_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case SELF_UP_PEER_UP:
+               switch (evt) {
+               case SELF_LOST_CONTACT_EVT:
+                       state = SELF_DOWN_PEER_LEAVING;
+                       break;
+               case PEER_LOST_CONTACT_EVT:
+                       state = SELF_LEAVING_PEER_DOWN;
+                       break;
+               case NODE_SYNCH_BEGIN_EVT:
+                       state = NODE_SYNCHING;
+                       break;
+               case NODE_FAILOVER_BEGIN_EVT:
+                       state = NODE_FAILINGOVER;
+                       break;
+               case SELF_ESTABL_CONTACT_EVT:
+               case PEER_ESTABL_CONTACT_EVT:
+               case NODE_SYNCH_END_EVT:
+               case NODE_FAILOVER_END_EVT:
+                       break;
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case SELF_DOWN_PEER_LEAVING:
+               switch (evt) {
+               case PEER_LOST_CONTACT_EVT:
+                       state = SELF_DOWN_PEER_DOWN;
+                       break;
+               case SELF_ESTABL_CONTACT_EVT:
+               case PEER_ESTABL_CONTACT_EVT:
+               case SELF_LOST_CONTACT_EVT:
+                       break;
+               case NODE_SYNCH_END_EVT:
+               case NODE_SYNCH_BEGIN_EVT:
+               case NODE_FAILOVER_BEGIN_EVT:
+               case NODE_FAILOVER_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case SELF_UP_PEER_COMING:
+               switch (evt) {
+               case PEER_ESTABL_CONTACT_EVT:
+                       state = SELF_UP_PEER_UP;
+                       break;
+               case SELF_LOST_CONTACT_EVT:
+                       state = SELF_DOWN_PEER_LEAVING;
+                       break;
+               case SELF_ESTABL_CONTACT_EVT:
+               case PEER_LOST_CONTACT_EVT:
+                       break;
+               case NODE_SYNCH_END_EVT:
+               case NODE_SYNCH_BEGIN_EVT:
+               case NODE_FAILOVER_BEGIN_EVT:
+               case NODE_FAILOVER_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case SELF_COMING_PEER_UP:
+               switch (evt) {
+               case SELF_ESTABL_CONTACT_EVT:
+                       state = SELF_UP_PEER_UP;
+                       break;
+               case PEER_LOST_CONTACT_EVT:
+                       state = SELF_LEAVING_PEER_DOWN;
+                       break;
+               case SELF_LOST_CONTACT_EVT:
+               case PEER_ESTABL_CONTACT_EVT:
+                       break;
+               case NODE_SYNCH_END_EVT:
+               case NODE_SYNCH_BEGIN_EVT:
+               case NODE_FAILOVER_BEGIN_EVT:
+               case NODE_FAILOVER_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case SELF_LEAVING_PEER_DOWN:
+               switch (evt) {
+               case SELF_LOST_CONTACT_EVT:
+                       state = SELF_DOWN_PEER_DOWN;
+                       break;
+               case SELF_ESTABL_CONTACT_EVT:
+               case PEER_ESTABL_CONTACT_EVT:
+               case PEER_LOST_CONTACT_EVT:
+                       break;
+               case NODE_SYNCH_END_EVT:
+               case NODE_SYNCH_BEGIN_EVT:
+               case NODE_FAILOVER_BEGIN_EVT:
+               case NODE_FAILOVER_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case NODE_FAILINGOVER:
+               switch (evt) {
+               case SELF_LOST_CONTACT_EVT:
+                       state = SELF_DOWN_PEER_LEAVING;
+                       break;
+               case PEER_LOST_CONTACT_EVT:
+                       state = SELF_LEAVING_PEER_DOWN;
+                       break;
+               case NODE_FAILOVER_END_EVT:
+                       state = SELF_UP_PEER_UP;
+                       break;
+               case NODE_FAILOVER_BEGIN_EVT:
+               case SELF_ESTABL_CONTACT_EVT:
+               case PEER_ESTABL_CONTACT_EVT:
+                       break;
+               case NODE_SYNCH_BEGIN_EVT:
+               case NODE_SYNCH_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       case NODE_SYNCHING:
+               switch (evt) {
+               case SELF_LOST_CONTACT_EVT:
+                       state = SELF_DOWN_PEER_LEAVING;
+                       break;
+               case PEER_LOST_CONTACT_EVT:
+                       state = SELF_LEAVING_PEER_DOWN;
+                       break;
+               case NODE_SYNCH_END_EVT:
+                       state = SELF_UP_PEER_UP;
+                       break;
+               case NODE_FAILOVER_BEGIN_EVT:
+                       state = NODE_FAILINGOVER;
+                       break;
+               case NODE_SYNCH_BEGIN_EVT:
+               case SELF_ESTABL_CONTACT_EVT:
+               case PEER_ESTABL_CONTACT_EVT:
+                       break;
+               case NODE_FAILOVER_END_EVT:
+               default:
+                       goto illegal_evt;
+               }
+               break;
+       default:
+               pr_err("Unknown node fsm state %x\n", state);
+               break;
+       }
+       n->state = state;
+       return;
+
+illegal_evt:
+       pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
 }
 
-void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
+bool tipc_node_filter_pkt(struct tipc_node *n, struct tipc_msg *hdr)
 {
-       int i;
+       int state = n->state;
 
-       for (i = 0; i < MAX_BEARERS; i++) {
-               if (l_ptr != n_ptr->links[i])
-                       continue;
-               n_ptr->links[i] = NULL;
-               n_ptr->link_cnt--;
+       if (likely(state == SELF_UP_PEER_UP))
+               return true;
+
+       if (state == SELF_LEAVING_PEER_DOWN)
+               return false;
+
+       if (state == SELF_DOWN_PEER_LEAVING) {
+               if (msg_peer_node_is_up(hdr))
+                       return false;
        }
+
+       return true;
 }
 
 static void node_established_contact(struct tipc_node *n_ptr)
 {
+       tipc_node_fsm_evt(n_ptr, SELF_ESTABL_CONTACT_EVT);
        n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
        n_ptr->bclink.oos_state = 0;
        n_ptr->bclink.acked = tipc_bclink_get_last_sent(n_ptr->net);
        tipc_bclink_add_node(n_ptr->net, n_ptr->addr);
 }
 
-static void node_lost_contact(struct tipc_node *n_ptr)
+static void node_lost_contact(struct tipc_node *n_ptr,
+                             struct sk_buff_head *inputq)
 {
        char addr_string[16];
        struct tipc_sock_conn *conn, *safe;
+       struct tipc_link *l;
        struct list_head *conns = &n_ptr->conn_sks;
        struct sk_buff *skb;
        struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
@@ -396,21 +840,13 @@ static void node_lost_contact(struct tipc_node *n_ptr)
 
        /* Abort any ongoing link failover */
        for (i = 0; i < MAX_BEARERS; i++) {
-               struct tipc_link *l_ptr = n_ptr->links[i];
-               if (!l_ptr)
-                       continue;
-               l_ptr->flags &= ~LINK_FAILINGOVER;
-               l_ptr->failover_checkpt = 0;
-               l_ptr->failover_pkts = 0;
-               kfree_skb(l_ptr->failover_skb);
-               l_ptr->failover_skb = NULL;
-               tipc_link_reset_fragments(l_ptr);
+               l = n_ptr->links[i].link;
+               if (l)
+                       tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
        }
 
-       n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
-
        /* Prevent re-contact with node until cleanup is done */
-       n_ptr->action_flags |= TIPC_WAIT_PEER_LINKS_DOWN;
+       tipc_node_fsm_evt(n_ptr, SELF_LOST_CONTACT_EVT);
 
        /* Notify publications from this node */
        n_ptr->action_flags |= TIPC_NOTIFY_NODE_DOWN;
@@ -421,10 +857,8 @@ static void node_lost_contact(struct tipc_node *n_ptr)
                                      SHORT_H_SIZE, 0, tn->own_addr,
                                      conn->peer_node, conn->port,
                                      conn->peer_port, TIPC_ERR_NO_NODE);
-               if (likely(skb)) {
-                       skb_queue_tail(n_ptr->inputq, skb);
-                       n_ptr->action_flags |= TIPC_MSG_EVT;
-               }
+               if (likely(skb))
+                       skb_queue_tail(inputq, skb);
                list_del(&conn->list);
                kfree(conn);
        }
@@ -453,7 +887,7 @@ int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
                goto exit;
 
        tipc_node_lock(node);
-       link = node->links[bearer_id];
+       link = node->links[bearer_id].link;
        if (link) {
                strncpy(linkname, link->name, len);
                err = 0;
@@ -471,27 +905,20 @@ void tipc_node_unlock(struct tipc_node *node)
        u32 flags = node->action_flags;
        u32 link_id = 0;
        struct list_head *publ_list;
-       struct sk_buff_head *inputq = node->inputq;
-       struct sk_buff_head *namedq;
 
-       if (likely(!flags || (flags == TIPC_MSG_EVT))) {
-               node->action_flags = 0;
+       if (likely(!flags)) {
                spin_unlock_bh(&node->lock);
-               if (flags == TIPC_MSG_EVT)
-                       tipc_sk_rcv(net, inputq);
                return;
        }
 
        addr = node->addr;
        link_id = node->link_id;
-       namedq = node->namedq;
        publ_list = &node->publ_list;
 
-       node->action_flags &= ~(TIPC_MSG_EVT |
-                               TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
+       node->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
                                TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP |
                                TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT |
-                               TIPC_NAMED_MSG_EVT | TIPC_BCAST_RESET);
+                               TIPC_BCAST_RESET);
 
        spin_unlock_bh(&node->lock);
 
@@ -512,17 +939,11 @@ void tipc_node_unlock(struct tipc_node *node)
                tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
                                      link_id, addr);
 
-       if (flags & TIPC_MSG_EVT)
-               tipc_sk_rcv(net, inputq);
-
-       if (flags & TIPC_NAMED_MSG_EVT)
-               tipc_named_rcv(net, namedq);
-
        if (flags & TIPC_BCAST_MSG_EVT)
                tipc_bclink_input(net);
 
        if (flags & TIPC_BCAST_RESET)
-               tipc_link_reset_all(node);
+               tipc_node_reset_links(node);
 }
 
 /* Caller should hold node lock for the passed node */
@@ -559,6 +980,279 @@ msg_full:
        return -EMSGSIZE;
 }
 
+static struct tipc_link *tipc_node_select_link(struct tipc_node *n, int sel,
+                                              int *bearer_id,
+                                              struct tipc_media_addr **maddr)
+{
+       int id = n->active_links[sel & 1];
+
+       if (unlikely(id < 0))
+               return NULL;
+
+       *bearer_id = id;
+       *maddr = &n->links[id].maddr;
+       return n->links[id].link;
+}
+
+/**
+ * tipc_node_xmit() is the general link level function for message sending
+ * @net: the applicable net namespace
+ * @list: chain of buffers containing message
+ * @dnode: address of destination node
+ * @selector: a number used for deterministic link selection
+ * Consumes the buffer chain, except when returning -ELINKCONG
+ * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
+ */
+int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
+                  u32 dnode, int selector)
+{
+       struct tipc_link *l = NULL;
+       struct tipc_node *n;
+       struct sk_buff_head xmitq;
+       struct tipc_media_addr *maddr;
+       int bearer_id;
+       int rc = -EHOSTUNREACH;
+
+       __skb_queue_head_init(&xmitq);
+       n = tipc_node_find(net, dnode);
+       if (likely(n)) {
+               tipc_node_lock(n);
+               l = tipc_node_select_link(n, selector, &bearer_id, &maddr);
+               if (likely(l))
+                       rc = tipc_link_xmit(l, list, &xmitq);
+               tipc_node_unlock(n);
+               if (unlikely(rc == -ENOBUFS))
+                       tipc_node_link_down(n, bearer_id, false);
+               tipc_node_put(n);
+       }
+       if (likely(!rc)) {
+               tipc_bearer_xmit(net, bearer_id, &xmitq, maddr);
+               return 0;
+       }
+       if (likely(in_own_node(net, dnode))) {
+               tipc_sk_rcv(net, list);
+               return 0;
+       }
+       return rc;
+}
+
+/* tipc_node_xmit_skb(): send single buffer to destination
+ * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
+ * messages, which will not be rejected
+ * The only exception is datagram messages rerouted after secondary
+ * lookup, which are rare and safe to dispose of anyway.
+ * TODO: Return real return value, and let callers use
+ * tipc_wait_for_sendpkt() where applicable
+ */
+int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
+                      u32 selector)
+{
+       struct sk_buff_head head;
+       int rc;
+
+       skb_queue_head_init(&head);
+       __skb_queue_tail(&head, skb);
+       rc = tipc_node_xmit(net, &head, dnode, selector);
+       if (rc == -ELINKCONG)
+               kfree_skb(skb);
+       return 0;
+}
+
+/**
+ * tipc_node_check_state - check and if necessary update node state
+ * @skb: TIPC packet
+ * @bearer_id: identity of bearer delivering the packet
+ * Returns true if state is ok, otherwise consumes buffer and returns false
+ */
+static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
+                                 int bearer_id, struct sk_buff_head *xmitq)
+{
+       struct tipc_msg *hdr = buf_msg(skb);
+       int usr = msg_user(hdr);
+       int mtyp = msg_type(hdr);
+       u16 oseqno = msg_seqno(hdr);
+       u16 iseqno = msg_seqno(msg_get_wrapped(hdr));
+       u16 exp_pkts = msg_msgcnt(hdr);
+       u16 rcv_nxt, syncpt, dlv_nxt;
+       int state = n->state;
+       struct tipc_link *l, *pl = NULL;
+       struct tipc_media_addr *maddr;
+       int i, pb_id;
+
+       l = n->links[bearer_id].link;
+       if (!l)
+               return false;
+       rcv_nxt = l->rcv_nxt;
+
+
+       if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
+               return true;
+
+       /* Find parallel link, if any */
+       for (i = 0; i < MAX_BEARERS; i++) {
+               if ((i != bearer_id) && n->links[i].link) {
+                       pl = n->links[i].link;
+                       break;
+               }
+       }
+
+       /* Update node accesibility if applicable */
+       if (state == SELF_UP_PEER_COMING) {
+               if (!tipc_link_is_up(l))
+                       return true;
+               if (!msg_peer_link_is_up(hdr))
+                       return true;
+               tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
+       }
+
+       if (state == SELF_DOWN_PEER_LEAVING) {
+               if (msg_peer_node_is_up(hdr))
+                       return false;
+               tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
+       }
+
+       /* Ignore duplicate packets */
+       if (less(oseqno, rcv_nxt))
+               return true;
+
+       /* Initiate or update failover mode if applicable */
+       if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
+               syncpt = oseqno + exp_pkts - 1;
+               if (pl && tipc_link_is_up(pl)) {
+                       pb_id = pl->bearer_id;
+                       __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
+                       tipc_skb_queue_splice_tail_init(pl->inputq, l->inputq);
+               }
+               /* If pkts arrive out of order, use lowest calculated syncpt */
+               if (less(syncpt, n->sync_point))
+                       n->sync_point = syncpt;
+       }
+
+       /* Open parallel link when tunnel link reaches synch point */
+       if ((n->state == NODE_FAILINGOVER) && !tipc_link_is_failingover(l)) {
+               if (!more(rcv_nxt, n->sync_point))
+                       return true;
+               tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
+               if (pl)
+                       tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
+               return true;
+       }
+
+       /* Initiate or update synch mode if applicable */
+       if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) {
+               syncpt = iseqno + exp_pkts - 1;
+               if (!tipc_link_is_up(l)) {
+                       tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);
+                       __tipc_node_link_up(n, bearer_id, xmitq);
+               }
+               if (n->state == SELF_UP_PEER_UP) {
+                       n->sync_point = syncpt;
+                       tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
+                       tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
+               }
+               if (less(syncpt, n->sync_point))
+                       n->sync_point = syncpt;
+       }
+
+       /* Open tunnel link when parallel link reaches synch point */
+       if ((n->state == NODE_SYNCHING) && tipc_link_is_synching(l)) {
+               if (pl)
+                       dlv_nxt = mod(pl->rcv_nxt - skb_queue_len(pl->inputq));
+               if (!pl || more(dlv_nxt, n->sync_point)) {
+                       tipc_link_fsm_evt(l, LINK_SYNCH_END_EVT);
+                       tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
+                       return true;
+               }
+               if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
+                       return true;
+               if (usr == LINK_PROTOCOL)
+                       return true;
+               return false;
+       }
+       return true;
+}
+
+/**
+ * tipc_rcv - process TIPC packets/messages arriving from off-node
+ * @net: the applicable net namespace
+ * @skb: TIPC packet
+ * @bearer: pointer to bearer message arrived on
+ *
+ * Invoked with no locks held. Bearer pointer must point to a valid bearer
+ * structure (i.e. cannot be NULL), but bearer can be inactive.
+ */
+void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
+{
+       struct sk_buff_head xmitq;
+       struct tipc_node *n;
+       struct tipc_msg *hdr = buf_msg(skb);
+       int usr = msg_user(hdr);
+       int bearer_id = b->identity;
+       struct tipc_link_entry *le;
+       int rc = 0;
+
+       __skb_queue_head_init(&xmitq);
+
+       /* Ensure message is well-formed */
+       if (unlikely(!tipc_msg_validate(skb)))
+               goto discard;
+
+       /* Handle arrival of a non-unicast link packet */
+       if (unlikely(msg_non_seq(hdr))) {
+               if (usr ==  LINK_CONFIG)
+                       tipc_disc_rcv(net, skb, b);
+               else
+                       tipc_bclink_rcv(net, skb);
+               return;
+       }
+
+       /* Locate neighboring node that sent packet */
+       n = tipc_node_find(net, msg_prevnode(hdr));
+       if (unlikely(!n))
+               goto discard;
+       le = &n->links[bearer_id];
+
+       tipc_node_lock(n);
+
+       /* Is reception permitted at the moment ? */
+       if (!tipc_node_filter_pkt(n, hdr))
+               goto unlock;
+
+       if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
+               tipc_bclink_sync_state(n, hdr);
+
+       /* Release acked broadcast packets */
+       if (unlikely(n->bclink.acked != msg_bcast_ack(hdr)))
+               tipc_bclink_acknowledge(n, msg_bcast_ack(hdr));
+
+       /* Check and if necessary update node state */
+       if (likely(tipc_node_check_state(n, skb, bearer_id, &xmitq))) {
+               rc = tipc_link_rcv(le->link, skb, &xmitq);
+               skb = NULL;
+       }
+unlock:
+       tipc_node_unlock(n);
+
+       if (unlikely(rc & TIPC_LINK_UP_EVT))
+               tipc_node_link_up(n, bearer_id, &xmitq);
+
+       if (unlikely(rc & TIPC_LINK_DOWN_EVT))
+               tipc_node_link_down(n, bearer_id, false);
+
+       if (unlikely(!skb_queue_empty(&n->bclink.namedq)))
+               tipc_named_rcv(net, &n->bclink.namedq);
+
+       if (!skb_queue_empty(&le->inputq))
+               tipc_sk_rcv(net, &le->inputq);
+
+       if (!skb_queue_empty(&xmitq))
+               tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
+
+       tipc_node_put(n);
+discard:
+       kfree_skb(skb);
+}
+
 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
        int err;
index 5a834cf142c8432fc4b5d65ab9ada044b342f438..344b3e7594fd0d59d8b83143181ffeac8c2df9a9 100644 (file)
 /* Out-of-range value for node signature */
 #define INVALID_NODE_SIG       0x10000
 
+#define INVALID_BEARER_ID -1
+
 /* Flags used to take different actions according to flag type
- * TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down
- * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down
  * TIPC_NOTIFY_NODE_DOWN: notify node is down
  * TIPC_NOTIFY_NODE_UP: notify node is up
  * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
  */
 enum {
-       TIPC_MSG_EVT                    = 1,
-       TIPC_WAIT_PEER_LINKS_DOWN       = (1 << 1),
-       TIPC_WAIT_OWN_LINKS_DOWN        = (1 << 2),
        TIPC_NOTIFY_NODE_DOWN           = (1 << 3),
        TIPC_NOTIFY_NODE_UP             = (1 << 4),
        TIPC_WAKEUP_BCAST_USERS         = (1 << 5),
        TIPC_NOTIFY_LINK_UP             = (1 << 6),
        TIPC_NOTIFY_LINK_DOWN           = (1 << 7),
-       TIPC_NAMED_MSG_EVT              = (1 << 8),
        TIPC_BCAST_MSG_EVT              = (1 << 9),
        TIPC_BCAST_RESET                = (1 << 10)
 };
@@ -85,10 +81,17 @@ struct tipc_node_bclink {
        u32 deferred_size;
        struct sk_buff_head deferdq;
        struct sk_buff *reasm_buf;
-       int inputq_map;
+       struct sk_buff_head namedq;
        bool recv_permitted;
 };
 
+struct tipc_link_entry {
+       struct tipc_link *link;
+       u32 mtu;
+       struct sk_buff_head inputq;
+       struct tipc_media_addr maddr;
+};
+
 /**
  * struct tipc_node - TIPC node structure
  * @addr: network address of node
@@ -98,11 +101,12 @@ struct tipc_node_bclink {
  * @hash: links to adjacent nodes in unsorted hash chain
  * @inputq: pointer to input queue containing messages for msg event
  * @namedq: pointer to name table input queue with name table messages
- * @curr_link: the link holding the node lock, if any
- * @active_links: pointers to active links to node
- * @links: pointers to all links to node
+ * @active_links: bearer ids of active links, used as index into links[] array
+ * @links: array containing references to all links to node
  * @action_flags: bit mask of different types of node actions
  * @bclink: broadcast-related info
+ * @state: connectivity state vs peer node
+ * @sync_point: sequence number where synch/failover is finished
  * @list: links to adjacent nodes in sorted list of cluster's nodes
  * @working_links: number of working links to node (both active and standby)
  * @link_cnt: number of links to node
@@ -118,14 +122,13 @@ struct tipc_node {
        spinlock_t lock;
        struct net *net;
        struct hlist_node hash;
-       struct sk_buff_head *inputq;
-       struct sk_buff_head *namedq;
-       struct tipc_link *active_links[2];
-       u32 act_mtus[2];
-       struct tipc_link *links[MAX_BEARERS];
+       int active_links[2];
+       struct tipc_link_entry links[MAX_BEARERS];
        int action_flags;
        struct tipc_node_bclink bclink;
        struct list_head list;
+       int state;
+       u16 sync_point;
        int link_cnt;
        u16 working_links;
        u16 capabilities;
@@ -133,25 +136,32 @@ struct tipc_node {
        u32 link_id;
        struct list_head publ_list;
        struct list_head conn_sks;
+       unsigned long keepalive_intv;
+       struct timer_list timer;
        struct rcu_head rcu;
 };
 
 struct tipc_node *tipc_node_find(struct net *net, u32 addr);
 void tipc_node_put(struct tipc_node *node);
-struct tipc_node *tipc_node_create(struct net *net, u32 addr);
 void tipc_node_stop(struct net *net);
+void tipc_node_check_dest(struct net *net, u32 onode,
+                         struct tipc_bearer *bearer,
+                         u16 capabilities, u32 signature,
+                         struct tipc_media_addr *maddr,
+                         bool *respond, bool *dupl_addr);
+void tipc_node_delete_links(struct net *net, int bearer_id);
 void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
 void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
-void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
-void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
-int tipc_node_active_links(struct tipc_node *n_ptr);
-int tipc_node_is_up(struct tipc_node *n_ptr);
+bool tipc_node_is_up(struct tipc_node *n);
 int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 node,
                           char *linkname, size_t len);
 void tipc_node_unlock(struct tipc_node *node);
+int tipc_node_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
+                  int selector);
+int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
+                      u32 selector);
 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
-
 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
 
 static inline void tipc_node_lock(struct tipc_node *node)
@@ -159,26 +169,30 @@ static inline void tipc_node_lock(struct tipc_node *node)
        spin_lock_bh(&node->lock);
 }
 
-static inline bool tipc_node_blocked(struct tipc_node *node)
+static inline struct tipc_link *node_active_link(struct tipc_node *n, int sel)
 {
-       return (node->action_flags & (TIPC_WAIT_PEER_LINKS_DOWN |
-               TIPC_NOTIFY_NODE_DOWN | TIPC_WAIT_OWN_LINKS_DOWN));
+       int bearer_id = n->active_links[sel & 1];
+
+       if (unlikely(bearer_id == INVALID_BEARER_ID))
+               return NULL;
+
+       return n->links[bearer_id].link;
 }
 
-static inline uint tipc_node_get_mtu(struct net *net, u32 addr, u32 selector)
+static inline unsigned int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
 {
-       struct tipc_node *node;
-       u32 mtu;
-
-       node = tipc_node_find(net, addr);
+       struct tipc_node *n;
+       int bearer_id;
+       unsigned int mtu = MAX_MSG_SIZE;
 
-       if (likely(node)) {
-               mtu = node->act_mtus[selector & 1];
-               tipc_node_put(node);
-       } else {
-               mtu = MAX_MSG_SIZE;
-       }
+       n = tipc_node_find(net, addr);
+       if (unlikely(!n))
+               return mtu;
 
+       bearer_id = n->active_links[sel & 1];
+       if (likely(bearer_id != INVALID_BEARER_ID))
+               mtu = n->links[bearer_id].mtu;
+       tipc_node_put(n);
        return mtu;
 }
 
index 3a7567f690f35458f0fe58e0cc1254a4ec8033fa..1060d52ff23eb14f7b2ed2f732beacf9a698502c 100644 (file)
@@ -248,6 +248,22 @@ static void tsk_advance_rx_queue(struct sock *sk)
        kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
 }
 
+/* tipc_sk_respond() : send response message back to sender
+ */
+static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
+{
+       u32 selector;
+       u32 dnode;
+       u32 onode = tipc_own_addr(sock_net(sk));
+
+       if (!tipc_msg_reverse(onode, &skb, err))
+               return;
+
+       dnode = msg_destnode(buf_msg(skb));
+       selector = msg_origport(buf_msg(skb));
+       tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
+}
+
 /**
  * tsk_rej_rx_queue - reject all buffers in socket receive queue
  *
@@ -256,13 +272,9 @@ static void tsk_advance_rx_queue(struct sock *sk)
 static void tsk_rej_rx_queue(struct sock *sk)
 {
        struct sk_buff *skb;
-       u32 dnode;
-       u32 own_node = tsk_own_node(tipc_sk(sk));
 
-       while ((skb = __skb_dequeue(&sk->sk_receive_queue))) {
-               if (tipc_msg_reverse(own_node, skb, &dnode, TIPC_ERR_NO_PORT))
-                       tipc_link_xmit_skb(sock_net(sk), skb, dnode, 0);
-       }
+       while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
+               tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
 }
 
 /* tsk_peer_msg - verify if message was sent by connected port's peer
@@ -441,9 +453,7 @@ static int tipc_release(struct socket *sock)
                                tsk->connected = 0;
                                tipc_node_remove_conn(net, dnode, tsk->portid);
                        }
-                       if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
-                                            TIPC_ERR_NO_PORT))
-                               tipc_link_xmit_skb(net, skb, dnode, 0);
+                       tipc_sk_respond(sk, skb, TIPC_ERR_NO_PORT);
                }
        }
 
@@ -456,7 +466,7 @@ static int tipc_release(struct socket *sock)
                                      tsk_own_node(tsk), tsk_peer_port(tsk),
                                      tsk->portid, TIPC_ERR_NO_PORT);
                if (skb)
-                       tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
+                       tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
                tipc_node_remove_conn(net, dnode, tsk->portid);
        }
 
@@ -686,21 +696,22 @@ new_mtu:
 
        do {
                rc = tipc_bclink_xmit(net, pktchain);
-               if (likely(rc >= 0)) {
-                       rc = dsz;
-                       break;
+               if (likely(!rc))
+                       return dsz;
+
+               if (rc == -ELINKCONG) {
+                       tsk->link_cong = 1;
+                       rc = tipc_wait_for_sndmsg(sock, &timeo);
+                       if (!rc)
+                               continue;
                }
+               __skb_queue_purge(pktchain);
                if (rc == -EMSGSIZE) {
                        msg->msg_iter = save;
                        goto new_mtu;
                }
-               if (rc != -ELINKCONG)
-                       break;
-               tipc_sk(sk)->link_cong = 1;
-               rc = tipc_wait_for_sndmsg(sock, &timeo);
-               if (rc)
-                       __skb_queue_purge(pktchain);
-       } while (!rc);
+               break;
+       } while (1);
        return rc;
 }
 
@@ -763,35 +774,35 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
 /**
  * tipc_sk_proto_rcv - receive a connection mng protocol message
  * @tsk: receiving socket
- * @skb: pointer to message buffer. Set to NULL if buffer is consumed.
+ * @skb: pointer to message buffer.
  */
-static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff **skb)
+static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
 {
-       struct tipc_msg *msg = buf_msg(*skb);
+       struct sock *sk = &tsk->sk;
+       struct tipc_msg *hdr = buf_msg(skb);
+       int mtyp = msg_type(hdr);
        int conn_cong;
-       u32 dnode;
-       u32 own_node = tsk_own_node(tsk);
+
        /* Ignore if connection cannot be validated: */
-       if (!tsk_peer_msg(tsk, msg))
+       if (!tsk_peer_msg(tsk, hdr))
                goto exit;
 
        tsk->probing_state = TIPC_CONN_OK;
 
-       if (msg_type(msg) == CONN_ACK) {
+       if (mtyp == CONN_PROBE) {
+               msg_set_type(hdr, CONN_PROBE_REPLY);
+               tipc_sk_respond(sk, skb, TIPC_OK);
+               return;
+       } else if (mtyp == CONN_ACK) {
                conn_cong = tsk_conn_cong(tsk);
-               tsk->sent_unacked -= msg_msgcnt(msg);
+               tsk->sent_unacked -= msg_msgcnt(hdr);
                if (conn_cong)
-                       tsk->sk.sk_write_space(&tsk->sk);
-       } else if (msg_type(msg) == CONN_PROBE) {
-               if (tipc_msg_reverse(own_node, *skb, &dnode, TIPC_OK)) {
-                       msg_set_type(msg, CONN_PROBE_REPLY);
-                       return;
-               }
+                       sk->sk_write_space(sk);
+       } else if (mtyp != CONN_PROBE_REPLY) {
+               pr_warn("Received unknown CONN_PROTO msg\n");
        }
-       /* Do nothing if msg_type() == CONN_PROBE_REPLY */
 exit:
-       kfree_skb(*skb);
-       *skb = NULL;
+       kfree_skb(skb);
 }
 
 static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
@@ -924,24 +935,25 @@ new_mtu:
        do {
                skb = skb_peek(pktchain);
                TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
-               rc = tipc_link_xmit(net, pktchain, dnode, tsk->portid);
-               if (likely(rc >= 0)) {
+               rc = tipc_node_xmit(net, pktchain, dnode, tsk->portid);
+               if (likely(!rc)) {
                        if (sock->state != SS_READY)
                                sock->state = SS_CONNECTING;
-                       rc = dsz;
-                       break;
+                       return dsz;
                }
+               if (rc == -ELINKCONG) {
+                       tsk->link_cong = 1;
+                       rc = tipc_wait_for_sndmsg(sock, &timeo);
+                       if (!rc)
+                               continue;
+               }
+               __skb_queue_purge(pktchain);
                if (rc == -EMSGSIZE) {
                        m->msg_iter = save;
                        goto new_mtu;
                }
-               if (rc != -ELINKCONG)
-                       break;
-               tsk->link_cong = 1;
-               rc = tipc_wait_for_sndmsg(sock, &timeo);
-               if (rc)
-                       __skb_queue_purge(pktchain);
-       } while (!rc);
+               break;
+       } while (1);
 
        return rc;
 }
@@ -1043,15 +1055,16 @@ next:
                return rc;
        do {
                if (likely(!tsk_conn_cong(tsk))) {
-                       rc = tipc_link_xmit(net, pktchain, dnode, portid);
+                       rc = tipc_node_xmit(net, pktchain, dnode, portid);
                        if (likely(!rc)) {
                                tsk->sent_unacked++;
                                sent += send;
                                if (sent == dsz)
-                                       break;
+                                       return dsz;
                                goto next;
                        }
                        if (rc == -EMSGSIZE) {
+                               __skb_queue_purge(pktchain);
                                tsk->max_pkt = tipc_node_get_mtu(net, dnode,
                                                                 portid);
                                m->msg_iter = save;
@@ -1059,13 +1072,13 @@ next:
                        }
                        if (rc != -ELINKCONG)
                                break;
+
                        tsk->link_cong = 1;
                }
                rc = tipc_wait_for_sndpkt(sock, &timeo);
-               if (rc)
-                       __skb_queue_purge(pktchain);
        } while (!rc);
 
+       __skb_queue_purge(pktchain);
        return sent ? sent : rc;
 }
 
@@ -1221,7 +1234,7 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
                return;
        msg = buf_msg(skb);
        msg_set_msgcnt(msg, ack);
-       tipc_link_xmit_skb(net, skb, dnode, msg_link_selector(msg));
+       tipc_node_xmit_skb(net, skb, dnode, msg_link_selector(msg));
 }
 
 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
@@ -1507,82 +1520,81 @@ static void tipc_data_ready(struct sock *sk)
  * @tsk: TIPC socket
  * @skb: pointer to message buffer. Set to NULL if buffer is consumed
  *
- * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise
+ * Returns true if everything ok, false otherwise
  */
-static int filter_connect(struct tipc_sock *tsk, struct sk_buff **skb)
+static bool filter_connect(struct tipc_sock *tsk, struct sk_buff *skb)
 {
        struct sock *sk = &tsk->sk;
        struct net *net = sock_net(sk);
        struct socket *sock = sk->sk_socket;
-       struct tipc_msg *msg = buf_msg(*skb);
-       int retval = -TIPC_ERR_NO_PORT;
+       struct tipc_msg *hdr = buf_msg(skb);
 
-       if (msg_mcast(msg))
-               return retval;
+       if (unlikely(msg_mcast(hdr)))
+               return false;
 
        switch ((int)sock->state) {
        case SS_CONNECTED:
+
                /* Accept only connection-based messages sent by peer */
-               if (tsk_peer_msg(tsk, msg)) {
-                       if (unlikely(msg_errcode(msg))) {
-                               sock->state = SS_DISCONNECTING;
-                               tsk->connected = 0;
-                               /* let timer expire on it's own */
-                               tipc_node_remove_conn(net, tsk_peer_node(tsk),
-                                                     tsk->portid);
-                       }
-                       retval = TIPC_OK;
+               if (unlikely(!tsk_peer_msg(tsk, hdr)))
+                       return false;
+
+               if (unlikely(msg_errcode(hdr))) {
+                       sock->state = SS_DISCONNECTING;
+                       tsk->connected = 0;
+                       /* Let timer expire on it's own */
+                       tipc_node_remove_conn(net, tsk_peer_node(tsk),
+                                             tsk->portid);
                }
-               break;
+               return true;
+
        case SS_CONNECTING:
-               /* Accept only ACK or NACK message */
 
-               if (unlikely(!msg_connected(msg)))
-                       break;
+               /* Accept only ACK or NACK message */
+               if (unlikely(!msg_connected(hdr)))
+                       return false;
 
-               if (unlikely(msg_errcode(msg))) {
+               if (unlikely(msg_errcode(hdr))) {
                        sock->state = SS_DISCONNECTING;
                        sk->sk_err = ECONNREFUSED;
-                       retval = TIPC_OK;
-                       break;
+                       return true;
                }
 
-               if (unlikely(msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)) {
+               if (unlikely(!msg_isdata(hdr))) {
                        sock->state = SS_DISCONNECTING;
                        sk->sk_err = EINVAL;
-                       retval = TIPC_OK;
-                       break;
+                       return true;
                }
 
-               tipc_sk_finish_conn(tsk, msg_origport(msg), msg_orignode(msg));
-               msg_set_importance(&tsk->phdr, msg_importance(msg));
+               tipc_sk_finish_conn(tsk, msg_origport(hdr), msg_orignode(hdr));
+               msg_set_importance(&tsk->phdr, msg_importance(hdr));
                sock->state = SS_CONNECTED;
 
-               /* If an incoming message is an 'ACK-', it should be
-                * discarded here because it doesn't contain useful
-                * data. In addition, we should try to wake up
-                * connect() routine if sleeping.
-                */
-               if (msg_data_sz(msg) == 0) {
-                       kfree_skb(*skb);
-                       *skb = NULL;
-                       if (waitqueue_active(sk_sleep(sk)))
-                               wake_up_interruptible(sk_sleep(sk));
-               }
-               retval = TIPC_OK;
-               break;
+               /* If 'ACK+' message, add to socket receive queue */
+               if (msg_data_sz(hdr))
+                       return true;
+
+               /* If empty 'ACK-' message, wake up sleeping connect() */
+               if (waitqueue_active(sk_sleep(sk)))
+                       wake_up_interruptible(sk_sleep(sk));
+
+               /* 'ACK-' message is neither accepted nor rejected: */
+               msg_set_dest_droppable(hdr, 1);
+               return false;
+
        case SS_LISTENING:
        case SS_UNCONNECTED:
+
                /* Accept only SYN message */
-               if (!msg_connected(msg) && !(msg_errcode(msg)))
-                       retval = TIPC_OK;
+               if (!msg_connected(hdr) && !(msg_errcode(hdr)))
+                       return true;
                break;
        case SS_DISCONNECTING:
                break;
        default:
                pr_err("Unknown socket state %u\n", sock->state);
        }
-       return retval;
+       return false;
 }
 
 /**
@@ -1617,61 +1629,70 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
 /**
  * filter_rcv - validate incoming message
  * @sk: socket
- * @skb: pointer to message. Set to NULL if buffer is consumed.
+ * @skb: pointer to message.
  *
  * Enqueues message on receive queue if acceptable; optionally handles
  * disconnect indication for a connected socket.
  *
  * Called with socket lock already taken
  *
- * Returns 0 (TIPC_OK) if message was ok, -TIPC error code if rejected
+ * Returns true if message was added to socket receive queue, otherwise false
  */
-static int filter_rcv(struct sock *sk, struct sk_buff **skb)
+static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
 {
        struct socket *sock = sk->sk_socket;
        struct tipc_sock *tsk = tipc_sk(sk);
-       struct tipc_msg *msg = buf_msg(*skb);
-       unsigned int limit = rcvbuf_limit(sk, *skb);
-       int rc = TIPC_OK;
+       struct tipc_msg *hdr = buf_msg(skb);
+       unsigned int limit = rcvbuf_limit(sk, skb);
+       int err = TIPC_OK;
+       int usr = msg_user(hdr);
 
-       if (unlikely(msg_user(msg) == CONN_MANAGER)) {
+       if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
                tipc_sk_proto_rcv(tsk, skb);
-               return TIPC_OK;
+               return false;
        }
 
-       if (unlikely(msg_user(msg) == SOCK_WAKEUP)) {
-               kfree_skb(*skb);
+       if (unlikely(usr == SOCK_WAKEUP)) {
+               kfree_skb(skb);
                tsk->link_cong = 0;
                sk->sk_write_space(sk);
-               *skb = NULL;
-               return TIPC_OK;
+               return false;
        }
 
-       /* Reject message if it is wrong sort of message for socket */
-       if (msg_type(msg) > TIPC_DIRECT_MSG)
-               return -TIPC_ERR_NO_PORT;
+       /* Drop if illegal message type */
+       if (unlikely(msg_type(hdr) > TIPC_DIRECT_MSG)) {
+               kfree_skb(skb);
+               return false;
+       }
 
-       if (sock->state == SS_READY) {
-               if (msg_connected(msg))
-                       return -TIPC_ERR_NO_PORT;
-       } else {
-               rc = filter_connect(tsk, skb);
-               if (rc != TIPC_OK || !*skb)
-                       return rc;
+       /* Reject if wrong message type for current socket state */
+       if (unlikely(sock->state == SS_READY)) {
+               if (msg_connected(hdr)) {
+                       err = TIPC_ERR_NO_PORT;
+                       goto reject;
+               }
+       } else if (unlikely(!filter_connect(tsk, skb))) {
+               err = TIPC_ERR_NO_PORT;
+               goto reject;
        }
 
        /* Reject message if there isn't room to queue it */
-       if (sk_rmem_alloc_get(sk) + (*skb)->truesize >= limit)
-               return -TIPC_ERR_OVERLOAD;
+       if (unlikely(sk_rmem_alloc_get(sk) + skb->truesize >= limit)) {
+               err = TIPC_ERR_OVERLOAD;
+               goto reject;
+       }
 
        /* Enqueue message */
-       TIPC_SKB_CB(*skb)->handle = NULL;
-       __skb_queue_tail(&sk->sk_receive_queue, *skb);
-       skb_set_owner_r(*skb, sk);
+       TIPC_SKB_CB(skb)->handle = NULL;
+       __skb_queue_tail(&sk->sk_receive_queue, skb);
+       skb_set_owner_r(skb, sk);
 
        sk->sk_data_ready(sk);
-       *skb = NULL;
-       return TIPC_OK;
+       return true;
+
+reject:
+       tipc_sk_respond(sk, skb, err);
+       return false;
 }
 
 /**
@@ -1685,22 +1706,10 @@ static int filter_rcv(struct sock *sk, struct sk_buff **skb)
  */
 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
 {
-       int err;
-       atomic_t *dcnt;
-       u32 dnode;
-       struct tipc_sock *tsk = tipc_sk(sk);
-       struct net *net = sock_net(sk);
-       uint truesize = skb->truesize;
+       unsigned int truesize = skb->truesize;
 
-       err = filter_rcv(sk, &skb);
-       if (likely(!skb)) {
-               dcnt = &tsk->dupl_rcvcnt;
-               if (atomic_read(dcnt) < TIPC_CONN_OVERLOAD_LIMIT)
-                       atomic_add(truesize, dcnt);
-               return 0;
-       }
-       if (!err || tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode, -err))
-               tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
+       if (likely(filter_rcv(sk, skb)))
+               atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
        return 0;
 }
 
@@ -1710,45 +1719,43 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
  * @inputq: list of incoming buffers with potentially different destinations
  * @sk: socket where the buffers should be enqueued
  * @dport: port number for the socket
- * @_skb: returned buffer to be forwarded or rejected, if applicable
  *
  * Caller must hold socket lock
- *
- * Returns TIPC_OK if all buffers enqueued, otherwise -TIPC_ERR_OVERLOAD
- * or -TIPC_ERR_NO_PORT
  */
-static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
-                          u32 dport, struct sk_buff **_skb)
+static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
+                           u32 dport)
 {
        unsigned int lim;
        atomic_t *dcnt;
-       int err;
        struct sk_buff *skb;
        unsigned long time_limit = jiffies + 2;
 
        while (skb_queue_len(inputq)) {
                if (unlikely(time_after_eq(jiffies, time_limit)))
-                       return TIPC_OK;
+                       return;
+
                skb = tipc_skb_dequeue(inputq, dport);
                if (unlikely(!skb))
-                       return TIPC_OK;
+                       return;
+
+               /* Add message directly to receive queue if possible */
                if (!sock_owned_by_user(sk)) {
-                       err = filter_rcv(sk, &skb);
-                       if (likely(!skb))
-                               continue;
-                       *_skb = skb;
-                       return err;
+                       filter_rcv(sk, skb);
+                       continue;
                }
+
+               /* Try backlog, compensating for double-counted bytes */
                dcnt = &tipc_sk(sk)->dupl_rcvcnt;
                if (sk->sk_backlog.len)
                        atomic_set(dcnt, 0);
                lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
                if (likely(!sk_add_backlog(sk, skb, lim)))
                        continue;
-               *_skb = skb;
-               return -TIPC_ERR_OVERLOAD;
+
+               /* Overload => reject message back to sender */
+               tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD);
+               break;
        }
-       return TIPC_OK;
 }
 
 /**
@@ -1756,49 +1763,46 @@ static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
  * @inputq: buffer list containing the buffers
  * Consumes all buffers in list until inputq is empty
  * Note: may be called in multiple threads referring to the same queue
- * Returns 0 if last buffer was accepted, otherwise -EHOSTUNREACH
- * Only node local calls check the return value, sending single-buffer queues
  */
-int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
+void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
 {
        u32 dnode, dport = 0;
        int err;
-       struct sk_buff *skb;
        struct tipc_sock *tsk;
-       struct tipc_net *tn;
        struct sock *sk;
+       struct sk_buff *skb;
 
        while (skb_queue_len(inputq)) {
-               err = -TIPC_ERR_NO_PORT;
-               skb = NULL;
                dport = tipc_skb_peek_port(inputq, dport);
                tsk = tipc_sk_lookup(net, dport);
+
                if (likely(tsk)) {
                        sk = &tsk->sk;
                        if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
-                               err = tipc_sk_enqueue(inputq, sk, dport, &skb);
+                               tipc_sk_enqueue(inputq, sk, dport);
                                spin_unlock_bh(&sk->sk_lock.slock);
-                               dport = 0;
                        }
                        sock_put(sk);
-               } else {
-                       skb = tipc_skb_dequeue(inputq, dport);
-               }
-               if (likely(!skb))
                        continue;
-               if (tipc_msg_lookup_dest(net, skb, &dnode, &err))
-                       goto xmit;
-               if (!err) {
-                       dnode = msg_destnode(buf_msg(skb));
-                       goto xmit;
                }
-               tn = net_generic(net, tipc_net_id);
-               if (!tipc_msg_reverse(tn->own_addr, skb, &dnode, -err))
+
+               /* No destination socket => dequeue skb if still there */
+               skb = tipc_skb_dequeue(inputq, dport);
+               if (!skb)
+                       return;
+
+               /* Try secondary lookup if unresolved named message */
+               err = TIPC_ERR_NO_PORT;
+               if (tipc_msg_lookup_dest(net, skb, &err))
+                       goto xmit;
+
+               /* Prepare for message rejection */
+               if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
                        continue;
 xmit:
-               tipc_link_xmit_skb(net, skb, dnode, dport);
+               dnode = msg_destnode(buf_msg(skb));
+               tipc_node_xmit_skb(net, skb, dnode, dport);
        }
-       return err ? -EHOSTUNREACH : 0;
 }
 
 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
@@ -2067,7 +2071,10 @@ static int tipc_shutdown(struct socket *sock, int how)
        struct net *net = sock_net(sk);
        struct tipc_sock *tsk = tipc_sk(sk);
        struct sk_buff *skb;
-       u32 dnode;
+       u32 dnode = tsk_peer_node(tsk);
+       u32 dport = tsk_peer_port(tsk);
+       u32 onode = tipc_own_addr(net);
+       u32 oport = tsk->portid;
        int res;
 
        if (how != SHUT_RDWR)
@@ -2080,6 +2087,8 @@ static int tipc_shutdown(struct socket *sock, int how)
        case SS_CONNECTED:
 
 restart:
+               dnode = tsk_peer_node(tsk);
+
                /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
                skb = __skb_dequeue(&sk->sk_receive_queue);
                if (skb) {
@@ -2087,19 +2096,13 @@ restart:
                                kfree_skb(skb);
                                goto restart;
                        }
-                       if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
-                                            TIPC_CONN_SHUTDOWN))
-                               tipc_link_xmit_skb(net, skb, dnode,
-                                                  tsk->portid);
+                       tipc_sk_respond(sk, skb, TIPC_CONN_SHUTDOWN);
                } else {
-                       dnode = tsk_peer_node(tsk);
-
                        skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
                                              TIPC_CONN_MSG, SHORT_H_SIZE,
-                                             0, dnode, tsk_own_node(tsk),
-                                             tsk_peer_port(tsk),
-                                             tsk->portid, TIPC_CONN_SHUTDOWN);
-                       tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
+                                             0, dnode, onode, dport, oport,
+                                             TIPC_CONN_SHUTDOWN);
+                       tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
                }
                tsk->connected = 0;
                sock->state = SS_DISCONNECTING;
@@ -2161,7 +2164,7 @@ static void tipc_sk_timeout(unsigned long data)
        }
        bh_unlock_sock(sk);
        if (skb)
-               tipc_link_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
+               tipc_node_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
 exit:
        sock_put(sk);
 }
index bf6551389522dfda37fb0eff4bb5d15221bb2b91..4241f22069dc93270f9760c2f30ec9d36ede84d7 100644 (file)
@@ -44,7 +44,7 @@
                                  SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
 int tipc_socket_init(void);
 void tipc_socket_stop(void);
-int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq);
+void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq);
 void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
                       struct sk_buff_head *inputq);
 void tipc_sk_reinit(struct net *net);
index 66deebc66aa10820880bd51839bd2ae379eaeb86..c170d3138953a2361df5439aeffadd29afa52ad9 100644 (file)
@@ -194,7 +194,8 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
                        .saddr = src->ipv6,
                        .flowi6_proto = IPPROTO_UDP
                };
-               err = ipv6_stub->ipv6_dst_lookup(ub->ubsock->sk, &ndst, &fl6);
+               err = ipv6_stub->ipv6_dst_lookup(net, ub->ubsock->sk, &ndst,
+                                                &fl6);
                if (err)
                        goto tx_error;
                ttl = ip6_dst_hoplimit(ndst);
index bd16c6c7e1e7660b8ce183c72b16eb65aa51af5b..0cebf1fc37a2743ba096747056fab6c927922b23 100644 (file)
@@ -2048,7 +2048,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
                xfrm_audit_policy_delete(xp, 1, true);
        } else {
                // reset the timers here?
-               WARN(1, "Dont know what to do with soft policy expire\n");
+               WARN(1, "Don't know what to do with soft policy expire\n");
        }
        km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
 
index bdf1c1607b808e9ac0d7ada164a25694c56b6965..c77c872fe8ee477c7cfabf5fd539824fc676b173 100644 (file)
@@ -60,4 +60,29 @@ static int (*bpf_l3_csum_replace)(void *ctx, int off, int from, int to, int flag
 static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flags) =
        (void *) BPF_FUNC_l4_csum_replace;
 
+#if defined(__x86_64__)
+
+#define PT_REGS_PARM1(x) ((x)->di)
+#define PT_REGS_PARM2(x) ((x)->si)
+#define PT_REGS_PARM3(x) ((x)->dx)
+#define PT_REGS_PARM4(x) ((x)->cx)
+#define PT_REGS_PARM5(x) ((x)->r8)
+#define PT_REGS_RET(x) ((x)->sp)
+#define PT_REGS_FP(x) ((x)->bp)
+#define PT_REGS_RC(x) ((x)->ax)
+#define PT_REGS_SP(x) ((x)->sp)
+
+#elif defined(__s390x__)
+
+#define PT_REGS_PARM1(x) ((x)->gprs[2])
+#define PT_REGS_PARM2(x) ((x)->gprs[3])
+#define PT_REGS_PARM3(x) ((x)->gprs[4])
+#define PT_REGS_PARM4(x) ((x)->gprs[5])
+#define PT_REGS_PARM5(x) ((x)->gprs[6])
+#define PT_REGS_RET(x) ((x)->gprs[14])
+#define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */
+#define PT_REGS_RC(x) ((x)->gprs[2])
+#define PT_REGS_SP(x) ((x)->gprs[15])
+
+#endif
 #endif
index 693605997abcbb9eb7d069d2ec57b8046579263b..ee0f110c9c543b54fd8593607d161598fb29e472 100644 (file)
@@ -822,6 +822,65 @@ static struct bpf_test tests[] = {
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
+       {
+               "PTR_TO_STACK store/load",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+       },
+       {
+               "PTR_TO_STACK store/load - bad alignment on off",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "misaligned access off -6 size 8",
+       },
+       {
+               "PTR_TO_STACK store/load - bad alignment on reg",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "misaligned access off -2 size 8",
+       },
+       {
+               "PTR_TO_STACK store/load - out of bounds low",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid stack off=-79992 size=8",
+       },
+       {
+               "PTR_TO_STACK store/load - out of bounds high",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid stack off=0 size=8",
+       },
 };
 
 static int probe_filter_length(struct bpf_insn *fp)
index 31620463701a55edc0afc54d45a5cabaef890410..3f450a8fa1f3487b2cf2d9b261f63dc3d25f6599 100644 (file)
@@ -29,7 +29,7 @@ int bpf_prog1(struct pt_regs *ctx)
        int len;
 
        /* non-portable! works for the given kernel only */
-       skb = (struct sk_buff *) ctx->di;
+       skb = (struct sk_buff *) PT_REGS_PARM1(ctx);
 
        dev = _(skb->dev);
 
index dc50f4f2943f937d5c389e5384d39a0153d3f273..b32367cfbff4aff3020bb9c36c8faf6c981dd0f8 100644 (file)
@@ -27,10 +27,10 @@ int bpf_prog2(struct pt_regs *ctx)
        long init_val = 1;
        long *value;
 
-       /* x64 specific: read ip of kfree_skb caller.
+       /* x64/s390x specific: read ip of kfree_skb caller.
         * non-portable version of __builtin_return_address(0)
         */
-       bpf_probe_read(&loc, sizeof(loc), (void *)ctx->sp);
+       bpf_probe_read(&loc, sizeof(loc), (void *)PT_REGS_RET(ctx));
 
        value = bpf_map_lookup_elem(&my_map, &loc);
        if (value)
@@ -79,7 +79,7 @@ struct bpf_map_def SEC("maps") my_hist_map = {
 SEC("kprobe/sys_write")
 int bpf_prog3(struct pt_regs *ctx)
 {
-       long write_size = ctx->dx; /* arg3 */
+       long write_size = PT_REGS_PARM3(ctx);
        long init_val = 1;
        long *value;
        struct hist_key key = {};
index 255ff27923666a844e3cb2881e25661278c7c1d6..bf337fbb09472cbe32bfbaff2d4313b7cafb58c6 100644 (file)
@@ -23,7 +23,7 @@ struct bpf_map_def SEC("maps") my_map = {
 SEC("kprobe/blk_mq_start_request")
 int bpf_prog1(struct pt_regs *ctx)
 {
-       long rq = ctx->di;
+       long rq = PT_REGS_PARM1(ctx);
        u64 val = bpf_ktime_get_ns();
 
        bpf_map_update_elem(&my_map, &rq, &val, BPF_ANY);
@@ -51,7 +51,7 @@ struct bpf_map_def SEC("maps") lat_map = {
 SEC("kprobe/blk_update_request")
 int bpf_prog2(struct pt_regs *ctx)
 {
-       long rq = ctx->di;
+       long rq = PT_REGS_PARM1(ctx);
        u64 *value, l, base;
        u32 index;
 
index 126b80512228aa6493c8332e75933ab5852acab1..ac4671420cf15949c4087b8c2847c69978646650 100644 (file)
@@ -27,7 +27,7 @@ struct bpf_map_def SEC("maps") my_map = {
 SEC("kprobe/kmem_cache_free")
 int bpf_prog1(struct pt_regs *ctx)
 {
-       long ptr = ctx->si;
+       long ptr = PT_REGS_PARM2(ctx);
 
        bpf_map_delete_elem(&my_map, &ptr);
        return 0;
@@ -36,11 +36,11 @@ int bpf_prog1(struct pt_regs *ctx)
 SEC("kretprobe/kmem_cache_alloc_node")
 int bpf_prog2(struct pt_regs *ctx)
 {
-       long ptr = ctx->ax;
+       long ptr = PT_REGS_RC(ctx);
        long ip = 0;
 
        /* get ip address of kmem_cache_alloc_node() caller */
-       bpf_probe_read(&ip, sizeof(ip), (void *)(ctx->bp + sizeof(ip)));
+       bpf_probe_read(&ip, sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip)));
 
        struct pair v = {
                .val = bpf_ktime_get_ns(),
index b71fe07a7a7a4820a77e7cb0d5a112febdbaa49c..b3f4295bf288536c1f9ae7500b542a77cf8aaec1 100644 (file)
@@ -24,7 +24,7 @@ int bpf_prog1(struct pt_regs *ctx)
 {
        struct seccomp_data sd = {};
 
-       bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+       bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
 
        /* dispatch into next BPF program depending on syscall number */
        bpf_tail_call(ctx, &progs, sd.nr);
@@ -42,7 +42,7 @@ PROG(__NR_write)(struct pt_regs *ctx)
 {
        struct seccomp_data sd = {};
 
-       bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+       bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
        if (sd.args[2] == 512) {
                char fmt[] = "write(fd=%d, buf=%p, size=%d)\n";
                bpf_trace_printk(fmt, sizeof(fmt),
@@ -55,7 +55,7 @@ PROG(__NR_read)(struct pt_regs *ctx)
 {
        struct seccomp_data sd = {};
 
-       bpf_probe_read(&sd, sizeof(sd), (void *)ctx->di);
+       bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM1(ctx));
        if (sd.args[2] > 128 && sd.args[2] <= 1024) {
                char fmt[] = "read(fd=%d, buf=%p, size=%d)\n";
                bpf_trace_printk(fmt, sizeof(fmt),
index 618c2bcd4eabc6143b0e7f0431f57b8620101fe5..2cd3d4c997383af3fde9ed090eb5d9ab32a7e467 100644 (file)
 #include <string.h>
 #include <bfd.h>
 #include <dis-asm.h>
+#include <regex.h>
+#include <fcntl.h>
 #include <sys/klog.h>
 #include <sys/types.h>
-#include <regex.h>
+#include <sys/stat.h>
+
+#define CMD_ACTION_SIZE_BUFFER         10
+#define CMD_ACTION_READ_ALL            3
 
 static void get_exec_path(char *tpath, size_t size)
 {
@@ -87,20 +92,66 @@ static void get_asm_insns(uint8_t *image, size_t len, int opcodes)
        bfd_close(bfdf);
 }
 
-static char *get_klog_buff(int *klen)
+static char *get_klog_buff(unsigned int *klen)
 {
-       int ret, len = klogctl(10, NULL, 0);
-       char *buff = malloc(len);
+       int ret, len;
+       char *buff;
+
+       len = klogctl(CMD_ACTION_SIZE_BUFFER, NULL, 0);
+       buff = malloc(len);
+       if (!buff)
+               return NULL;
+
+       ret = klogctl(CMD_ACTION_READ_ALL, buff, len);
+       if (ret < 0) {
+               free(buff);
+               return NULL;
+       }
 
-       assert(buff && klen);
-       ret = klogctl(3, buff, len);
-       assert(ret >= 0);
        *klen = ret;
+       return buff;
+}
 
+static char *get_flog_buff(const char *file, unsigned int *klen)
+{
+       int fd, ret, len;
+       struct stat fi;
+       char *buff;
+
+       fd = open(file, O_RDONLY);
+       if (fd < 0)
+               return NULL;
+
+       ret = fstat(fd, &fi);
+       if (ret < 0 || !S_ISREG(fi.st_mode))
+               goto out;
+
+       len = fi.st_size + 1;
+       buff = malloc(len);
+       if (!buff)
+               goto out;
+
+       memset(buff, 0, len);
+       ret = read(fd, buff, len - 1);
+       if (ret <= 0)
+               goto out_free;
+
+       close(fd);
+       *klen = ret;
        return buff;
+out_free:
+       free(buff);
+out:
+       close(fd);
+       return NULL;
+}
+
+static char *get_log_buff(const char *file, unsigned int *klen)
+{
+       return file ? get_flog_buff(file, klen) : get_klog_buff(klen);
 }
 
-static void put_klog_buff(char *buff)
+static void put_log_buff(char *buff)
 {
        free(buff);
 }
@@ -138,8 +189,10 @@ static int get_last_jit_image(char *haystack, size_t hlen,
        ptr = haystack + off - (pmatch[0].rm_eo - pmatch[0].rm_so);
        ret = sscanf(ptr, "flen=%d proglen=%d pass=%d image=%lx",
                     &flen, &proglen, &pass, &base);
-       if (ret != 4)
+       if (ret != 4) {
+               regfree(&regex);
                return 0;
+       }
 
        tmp = ptr = haystack + off;
        while ((ptr = strtok(tmp, "\n")) != NULL && ulen < ilen) {
@@ -169,31 +222,49 @@ static int get_last_jit_image(char *haystack, size_t hlen,
        return ulen;
 }
 
+static void usage(void)
+{
+       printf("Usage: bpf_jit_disasm [...]\n");
+       printf("       -o          Also display related opcodes (default: off).\n");
+       printf("       -f <file>   Read last image dump from file or stdin (default: klog).\n");
+       printf("       -h          Display this help.\n");
+}
+
 int main(int argc, char **argv)
 {
-       int len, klen, opcodes = 0;
-       char *kbuff;
+       unsigned int len, klen, opt, opcodes = 0;
        static uint8_t image[32768];
+       char *kbuff, *file = NULL;
 
-       if (argc > 1) {
-               if (!strncmp("-o", argv[argc - 1], 2)) {
+       while ((opt = getopt(argc, argv, "of:")) != -1) {
+               switch (opt) {
+               case 'o':
                        opcodes = 1;
-               } else {
-                       printf("usage: bpf_jit_disasm [-o: show opcodes]\n");
-                       exit(0);
+                       break;
+               case 'f':
+                       file = optarg;
+                       break;
+               default:
+                       usage();
+                       return -1;
                }
        }
 
        bfd_init();
        memset(image, 0, sizeof(image));
 
-       kbuff = get_klog_buff(&klen);
+       kbuff = get_log_buff(file, &klen);
+       if (!kbuff) {
+               fprintf(stderr, "Could not retrieve log buffer!\n");
+               return -1;
+       }
 
        len = get_last_jit_image(kbuff, klen, image, sizeof(image));
        if (len > 0)
                get_asm_insns(image, len, opcodes);
+       else
+               fprintf(stderr, "No JIT image found!\n");
 
-       put_klog_buff(kbuff);
-
+       put_log_buff(kbuff);
        return 0;
 }
This page took 0.959705 seconds and 5 git commands to generate.